1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2014 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
27 #include <linux/prefetch.h>
28 #include <net/busy_poll.h>
30 #include "i40e_prototype.h"
32 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
35 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
36 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
37 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
38 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
39 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
42 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
43 #define I40E_FD_CLEAN_DELAY 10
45 * i40e_program_fdir_filter - Program a Flow Director filter
46 * @fdir_data: Packet data that will be filter parameters
47 * @raw_packet: the pre-allocated packet buffer for FDir
49 * @add: True for add/update, False for remove
51 int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
52 struct i40e_pf *pf, bool add)
54 struct i40e_filter_program_desc *fdir_desc;
55 struct i40e_tx_buffer *tx_buf, *first;
56 struct i40e_tx_desc *tx_desc;
57 struct i40e_ring *tx_ring;
58 unsigned int fpt, dcc;
66 /* find existing FDIR VSI */
68 for (i = 0; i < pf->num_alloc_vsi; i++)
69 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
74 tx_ring = vsi->tx_rings[0];
77 /* we need two descriptors to add/del a filter and we can wait */
79 if (I40E_DESC_UNUSED(tx_ring) > 1)
81 msleep_interruptible(1);
83 } while (delay < I40E_FD_CLEAN_DELAY);
85 if (!(I40E_DESC_UNUSED(tx_ring) > 1))
88 dma = dma_map_single(dev, raw_packet,
89 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
90 if (dma_mapping_error(dev, dma))
93 /* grab the next descriptor */
94 i = tx_ring->next_to_use;
95 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
96 first = &tx_ring->tx_bi[i];
97 memset(first, 0, sizeof(struct i40e_tx_buffer));
99 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
101 fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
102 I40E_TXD_FLTR_QW0_QINDEX_MASK;
104 fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
105 I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
107 fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
108 I40E_TXD_FLTR_QW0_PCTYPE_MASK;
110 /* Use LAN VSI Id if not programmed by user */
111 if (fdir_data->dest_vsi == 0)
112 fpt |= (pf->vsi[pf->lan_vsi]->id) <<
113 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
115 fpt |= ((u32)fdir_data->dest_vsi <<
116 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
117 I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
119 dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
122 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
123 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
125 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
126 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
128 dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
129 I40E_TXD_FLTR_QW1_DEST_MASK;
131 dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
132 I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
134 if (fdir_data->cnt_index != 0) {
135 dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
136 dcc |= ((u32)fdir_data->cnt_index <<
137 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
138 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
141 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
142 fdir_desc->rsvd = cpu_to_le32(0);
143 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
144 fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
146 /* Now program a dummy descriptor */
147 i = tx_ring->next_to_use;
148 tx_desc = I40E_TX_DESC(tx_ring, i);
149 tx_buf = &tx_ring->tx_bi[i];
151 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
153 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
155 /* record length, and DMA address */
156 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
157 dma_unmap_addr_set(tx_buf, dma, dma);
159 tx_desc->buffer_addr = cpu_to_le64(dma);
160 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
162 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
163 tx_buf->raw_buf = (void *)raw_packet;
165 tx_desc->cmd_type_offset_bsz =
166 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
168 /* Force memory writes to complete before letting h/w
169 * know there are new descriptors to fetch.
173 /* Mark the data descriptor to be watched */
174 first->next_to_watch = tx_desc;
176 writel(tx_ring->next_to_use, tx_ring->tail);
183 #define IP_HEADER_OFFSET 14
184 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
186 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
187 * @vsi: pointer to the targeted VSI
188 * @fd_data: the flow director data required for the FDir descriptor
189 * @add: true adds a filter, false removes it
191 * Returns 0 if the filters were successfully added or removed
193 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
194 struct i40e_fdir_filter *fd_data,
197 struct i40e_pf *pf = vsi->back;
203 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
204 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
205 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
207 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
210 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
212 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
213 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
214 + sizeof(struct iphdr));
216 ip->daddr = fd_data->dst_ip[0];
217 udp->dest = fd_data->dst_port;
218 ip->saddr = fd_data->src_ip[0];
219 udp->source = fd_data->src_port;
221 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
222 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
224 dev_info(&pf->pdev->dev,
225 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
226 fd_data->pctype, fd_data->fd_id, ret);
228 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
230 dev_info(&pf->pdev->dev,
231 "Filter OK for PCTYPE %d loc = %d\n",
232 fd_data->pctype, fd_data->fd_id);
234 dev_info(&pf->pdev->dev,
235 "Filter deleted for PCTYPE %d loc = %d\n",
236 fd_data->pctype, fd_data->fd_id);
241 return err ? -EOPNOTSUPP : 0;
244 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
246 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
247 * @vsi: pointer to the targeted VSI
248 * @fd_data: the flow director data required for the FDir descriptor
249 * @add: true adds a filter, false removes it
251 * Returns 0 if the filters were successfully added or removed
253 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
254 struct i40e_fdir_filter *fd_data,
257 struct i40e_pf *pf = vsi->back;
264 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
265 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
266 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
267 0x0, 0x72, 0, 0, 0, 0};
269 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
272 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
274 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
275 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
276 + sizeof(struct iphdr));
278 ip->daddr = fd_data->dst_ip[0];
279 tcp->dest = fd_data->dst_port;
280 ip->saddr = fd_data->src_ip[0];
281 tcp->source = fd_data->src_port;
285 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
286 if (I40E_DEBUG_FD & pf->hw.debug_mask)
287 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
288 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
291 pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
292 (pf->fd_tcp_rule - 1) : 0;
293 if (pf->fd_tcp_rule == 0) {
294 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
295 if (I40E_DEBUG_FD & pf->hw.debug_mask)
296 dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
300 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
301 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
304 dev_info(&pf->pdev->dev,
305 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
306 fd_data->pctype, fd_data->fd_id, ret);
308 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
310 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
311 fd_data->pctype, fd_data->fd_id);
313 dev_info(&pf->pdev->dev,
314 "Filter deleted for PCTYPE %d loc = %d\n",
315 fd_data->pctype, fd_data->fd_id);
321 return err ? -EOPNOTSUPP : 0;
325 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
326 * a specific flow spec
327 * @vsi: pointer to the targeted VSI
328 * @fd_data: the flow director data required for the FDir descriptor
329 * @add: true adds a filter, false removes it
331 * Returns 0 if the filters were successfully added or removed
333 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
334 struct i40e_fdir_filter *fd_data,
340 #define I40E_IP_DUMMY_PACKET_LEN 34
342 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
343 * a specific flow spec
344 * @vsi: pointer to the targeted VSI
345 * @fd_data: the flow director data required for the FDir descriptor
346 * @add: true adds a filter, false removes it
348 * Returns 0 if the filters were successfully added or removed
350 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
351 struct i40e_fdir_filter *fd_data,
354 struct i40e_pf *pf = vsi->back;
360 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
361 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
364 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
365 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
366 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
369 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
370 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
372 ip->saddr = fd_data->src_ip[0];
373 ip->daddr = fd_data->dst_ip[0];
377 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
380 dev_info(&pf->pdev->dev,
381 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
382 fd_data->pctype, fd_data->fd_id, ret);
384 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
386 dev_info(&pf->pdev->dev,
387 "Filter OK for PCTYPE %d loc = %d\n",
388 fd_data->pctype, fd_data->fd_id);
390 dev_info(&pf->pdev->dev,
391 "Filter deleted for PCTYPE %d loc = %d\n",
392 fd_data->pctype, fd_data->fd_id);
399 return err ? -EOPNOTSUPP : 0;
403 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
404 * @vsi: pointer to the targeted VSI
405 * @cmd: command to get or set RX flow classification rules
406 * @add: true adds a filter, false removes it
409 int i40e_add_del_fdir(struct i40e_vsi *vsi,
410 struct i40e_fdir_filter *input, bool add)
412 struct i40e_pf *pf = vsi->back;
415 switch (input->flow_type & ~FLOW_EXT) {
417 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
420 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
423 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
426 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
429 switch (input->ip4_proto) {
431 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
434 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
437 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
440 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
445 dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
450 /* The buffer allocated here is freed by the i40e_clean_tx_ring() */
455 * i40e_fd_handle_status - check the Programming Status for FD
456 * @rx_ring: the Rx ring for this descriptor
457 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
458 * @prog_id: the id originally used for programming
460 * This is used to verify if the FD programming or invalidation
461 * requested by SW to the HW is successful or not and take actions accordingly.
463 static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
464 union i40e_rx_desc *rx_desc, u8 prog_id)
466 struct i40e_pf *pf = rx_ring->vsi->back;
467 struct pci_dev *pdev = pf->pdev;
468 u32 fcnt_prog, fcnt_avail;
472 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
473 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
474 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
476 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
477 pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
478 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
479 (I40E_DEBUG_FD & pf->hw.debug_mask))
480 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
483 /* Check if the programming error is for ATR.
484 * If so, auto disable ATR and set a state for
485 * flush in progress. Next time we come here if flush is in
486 * progress do nothing, once flush is complete the state will
489 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
493 /* store the current atr filter count */
494 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
496 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
497 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
498 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
499 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
502 /* filter programming failed most likely due to table full */
503 fcnt_prog = i40e_get_global_fd_count(pf);
504 fcnt_avail = pf->fdir_pf_filter_count;
505 /* If ATR is running fcnt_prog can quickly change,
506 * if we are very close to full, it makes sense to disable
507 * FD ATR/SB and then re-enable it when there is room.
509 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
510 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
511 !(pf->auto_disable_flags &
512 I40E_FLAG_FD_SB_ENABLED)) {
513 if (I40E_DEBUG_FD & pf->hw.debug_mask)
514 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
515 pf->auto_disable_flags |=
516 I40E_FLAG_FD_SB_ENABLED;
519 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
520 if (I40E_DEBUG_FD & pf->hw.debug_mask)
521 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
522 rx_desc->wb.qword0.hi_dword.fd_id);
527 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
528 * @ring: the ring that owns the buffer
529 * @tx_buffer: the buffer to free
531 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
532 struct i40e_tx_buffer *tx_buffer)
534 if (tx_buffer->skb) {
535 dev_kfree_skb_any(tx_buffer->skb);
536 if (dma_unmap_len(tx_buffer, len))
537 dma_unmap_single(ring->dev,
538 dma_unmap_addr(tx_buffer, dma),
539 dma_unmap_len(tx_buffer, len),
541 } else if (dma_unmap_len(tx_buffer, len)) {
542 dma_unmap_page(ring->dev,
543 dma_unmap_addr(tx_buffer, dma),
544 dma_unmap_len(tx_buffer, len),
548 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
549 kfree(tx_buffer->raw_buf);
551 tx_buffer->next_to_watch = NULL;
552 tx_buffer->skb = NULL;
553 dma_unmap_len_set(tx_buffer, len, 0);
554 /* tx_buffer must be completely set up in the transmit path */
558 * i40e_clean_tx_ring - Free any empty Tx buffers
559 * @tx_ring: ring to be cleaned
561 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
563 unsigned long bi_size;
566 /* ring already cleared, nothing to do */
570 /* Free all the Tx ring sk_buffs */
571 for (i = 0; i < tx_ring->count; i++)
572 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
574 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
575 memset(tx_ring->tx_bi, 0, bi_size);
577 /* Zero out the descriptor ring */
578 memset(tx_ring->desc, 0, tx_ring->size);
580 tx_ring->next_to_use = 0;
581 tx_ring->next_to_clean = 0;
583 if (!tx_ring->netdev)
586 /* cleanup Tx queue statistics */
587 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
588 tx_ring->queue_index));
592 * i40e_free_tx_resources - Free Tx resources per queue
593 * @tx_ring: Tx descriptor ring for a specific queue
595 * Free all transmit software resources
597 void i40e_free_tx_resources(struct i40e_ring *tx_ring)
599 i40e_clean_tx_ring(tx_ring);
600 kfree(tx_ring->tx_bi);
601 tx_ring->tx_bi = NULL;
604 dma_free_coherent(tx_ring->dev, tx_ring->size,
605 tx_ring->desc, tx_ring->dma);
606 tx_ring->desc = NULL;
611 * i40e_get_tx_pending - how many tx descriptors not processed
612 * @tx_ring: the ring of descriptors
614 * Since there is no access to the ring head register
615 * in XL710, we need to use our local copies
617 u32 i40e_get_tx_pending(struct i40e_ring *ring)
621 head = i40e_get_head(ring);
622 tail = readl(ring->tail);
625 return (head < tail) ?
626 tail - head : (tail + ring->count - head);
631 #define WB_STRIDE 0x3
634 * i40e_clean_tx_irq - Reclaim resources after transmit completes
635 * @tx_ring: tx ring to clean
636 * @budget: how many cleans we're allowed
638 * Returns true if there's any budget left (e.g. the clean is finished)
640 static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
642 u16 i = tx_ring->next_to_clean;
643 struct i40e_tx_buffer *tx_buf;
644 struct i40e_tx_desc *tx_head;
645 struct i40e_tx_desc *tx_desc;
646 unsigned int total_packets = 0;
647 unsigned int total_bytes = 0;
649 tx_buf = &tx_ring->tx_bi[i];
650 tx_desc = I40E_TX_DESC(tx_ring, i);
653 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
656 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
658 /* if next_to_watch is not set then there is no work pending */
662 /* prevent any other reads prior to eop_desc */
663 read_barrier_depends();
665 /* we have caught up to head, no work left to do */
666 if (tx_head == tx_desc)
669 /* clear next_to_watch to prevent false hangs */
670 tx_buf->next_to_watch = NULL;
672 /* update the statistics for this packet */
673 total_bytes += tx_buf->bytecount;
674 total_packets += tx_buf->gso_segs;
677 dev_consume_skb_any(tx_buf->skb);
679 /* unmap skb header data */
680 dma_unmap_single(tx_ring->dev,
681 dma_unmap_addr(tx_buf, dma),
682 dma_unmap_len(tx_buf, len),
685 /* clear tx_buffer data */
687 dma_unmap_len_set(tx_buf, len, 0);
689 /* unmap remaining buffers */
690 while (tx_desc != eop_desc) {
697 tx_buf = tx_ring->tx_bi;
698 tx_desc = I40E_TX_DESC(tx_ring, 0);
701 /* unmap any remaining paged data */
702 if (dma_unmap_len(tx_buf, len)) {
703 dma_unmap_page(tx_ring->dev,
704 dma_unmap_addr(tx_buf, dma),
705 dma_unmap_len(tx_buf, len),
707 dma_unmap_len_set(tx_buf, len, 0);
711 /* move us one more past the eop_desc for start of next pkt */
717 tx_buf = tx_ring->tx_bi;
718 tx_desc = I40E_TX_DESC(tx_ring, 0);
723 /* update budget accounting */
725 } while (likely(budget));
728 tx_ring->next_to_clean = i;
729 u64_stats_update_begin(&tx_ring->syncp);
730 tx_ring->stats.bytes += total_bytes;
731 tx_ring->stats.packets += total_packets;
732 u64_stats_update_end(&tx_ring->syncp);
733 tx_ring->q_vector->tx.total_bytes += total_bytes;
734 tx_ring->q_vector->tx.total_packets += total_packets;
736 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
739 /* check to see if there are < 4 descriptors
740 * waiting to be written back, then kick the hardware to force
741 * them to be written back in case we stay in NAPI.
742 * In this mode on X722 we do not enable Interrupt.
744 j = i40e_get_tx_pending(tx_ring);
747 ((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
748 !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
749 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
750 tx_ring->arm_wb = true;
753 netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
754 tx_ring->queue_index),
755 total_packets, total_bytes);
757 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
758 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
759 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
760 /* Make sure that anybody stopping the queue after this
761 * sees the new next_to_clean.
764 if (__netif_subqueue_stopped(tx_ring->netdev,
765 tx_ring->queue_index) &&
766 !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
767 netif_wake_subqueue(tx_ring->netdev,
768 tx_ring->queue_index);
769 ++tx_ring->tx_stats.restart_queue;
777 * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors
778 * @vsi: the VSI we care about
779 * @q_vector: the vector on which to force writeback
782 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
784 u16 flags = q_vector->tx.ring[0].flags;
786 if (flags & I40E_TXR_FLAGS_WB_ON_ITR) {
789 if (q_vector->arm_wb_state)
792 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
793 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
794 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
797 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
798 vsi->base_vector - 1),
801 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
802 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
804 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
806 q_vector->arm_wb_state = true;
807 } else if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
808 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
809 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
810 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
811 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
812 /* allow 00 to be written to the index */
815 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
816 vsi->base_vector - 1), val);
818 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
819 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
820 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
821 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
822 /* allow 00 to be written to the index */
824 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
829 * i40e_set_new_dynamic_itr - Find new ITR level
830 * @rc: structure containing ring performance data
832 * Returns true if ITR changed, false if not
834 * Stores a new ITR value based on packets and byte counts during
835 * the last interrupt. The advantage of per interrupt computation
836 * is faster updates and more accurate ITR for the current traffic
837 * pattern. Constants in this function were computed based on
838 * theoretical maximum wire speed and thresholds were set based on
839 * testing data as well as attempting to minimize response time
840 * while increasing bulk throughput.
842 static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
844 enum i40e_latency_range new_latency_range = rc->latency_range;
845 struct i40e_q_vector *qv = rc->ring->q_vector;
846 u32 new_itr = rc->itr;
850 if (rc->total_packets == 0 || !rc->itr)
853 /* simple throttlerate management
854 * 0-10MB/s lowest (50000 ints/s)
855 * 10-20MB/s low (20000 ints/s)
856 * 20-1249MB/s bulk (18000 ints/s)
857 * > 40000 Rx packets per second (8000 ints/s)
859 * The math works out because the divisor is in 10^(-6) which
860 * turns the bytes/us input value into MB/s values, but
861 * make sure to use usecs, as the register values written
862 * are in 2 usec increments in the ITR registers, and make sure
863 * to use the smoothed values that the countdown timer gives us.
865 usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
866 bytes_per_int = rc->total_bytes / usecs;
868 switch (new_latency_range) {
869 case I40E_LOWEST_LATENCY:
870 if (bytes_per_int > 10)
871 new_latency_range = I40E_LOW_LATENCY;
873 case I40E_LOW_LATENCY:
874 if (bytes_per_int > 20)
875 new_latency_range = I40E_BULK_LATENCY;
876 else if (bytes_per_int <= 10)
877 new_latency_range = I40E_LOWEST_LATENCY;
879 case I40E_BULK_LATENCY:
880 case I40E_ULTRA_LATENCY:
882 if (bytes_per_int <= 20)
883 new_latency_range = I40E_LOW_LATENCY;
887 /* this is to adjust RX more aggressively when streaming small
888 * packets. The value of 40000 was picked as it is just beyond
889 * what the hardware can receive per second if in low latency
892 #define RX_ULTRA_PACKET_RATE 40000
894 if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
896 new_latency_range = I40E_ULTRA_LATENCY;
898 rc->latency_range = new_latency_range;
900 switch (new_latency_range) {
901 case I40E_LOWEST_LATENCY:
902 new_itr = I40E_ITR_50K;
904 case I40E_LOW_LATENCY:
905 new_itr = I40E_ITR_20K;
907 case I40E_BULK_LATENCY:
908 new_itr = I40E_ITR_18K;
910 case I40E_ULTRA_LATENCY:
911 new_itr = I40E_ITR_8K;
918 rc->total_packets = 0;
920 if (new_itr != rc->itr) {
929 * i40e_clean_programming_status - clean the programming status descriptor
930 * @rx_ring: the rx ring that has this descriptor
931 * @rx_desc: the rx descriptor written back by HW
933 * Flow director should handle FD_FILTER_STATUS to check its filter programming
934 * status being successful or not and take actions accordingly. FCoE should
935 * handle its context/filter programming/invalidation status and take actions.
938 static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
939 union i40e_rx_desc *rx_desc)
944 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
945 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
946 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
948 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
949 i40e_fd_handle_status(rx_ring, rx_desc, id);
951 else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
952 (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
953 i40e_fcoe_handle_status(rx_ring, rx_desc, id);
958 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
959 * @tx_ring: the tx ring to set up
961 * Return 0 on success, negative on error
963 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
965 struct device *dev = tx_ring->dev;
971 /* warn if we are about to overwrite the pointer */
972 WARN_ON(tx_ring->tx_bi);
973 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
974 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
978 /* round up to nearest 4K */
979 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
980 /* add u32 for head writeback, align after this takes care of
981 * guaranteeing this is at least one cache line in size
983 tx_ring->size += sizeof(u32);
984 tx_ring->size = ALIGN(tx_ring->size, 4096);
985 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
986 &tx_ring->dma, GFP_KERNEL);
987 if (!tx_ring->desc) {
988 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
993 tx_ring->next_to_use = 0;
994 tx_ring->next_to_clean = 0;
998 kfree(tx_ring->tx_bi);
999 tx_ring->tx_bi = NULL;
1004 * i40e_clean_rx_ring - Free Rx buffers
1005 * @rx_ring: ring to be cleaned
1007 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1009 struct device *dev = rx_ring->dev;
1010 struct i40e_rx_buffer *rx_bi;
1011 unsigned long bi_size;
1014 /* ring already cleared, nothing to do */
1015 if (!rx_ring->rx_bi)
1018 if (ring_is_ps_enabled(rx_ring)) {
1019 int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
1021 rx_bi = &rx_ring->rx_bi[0];
1022 if (rx_bi->hdr_buf) {
1023 dma_free_coherent(dev,
1027 for (i = 0; i < rx_ring->count; i++) {
1028 rx_bi = &rx_ring->rx_bi[i];
1030 rx_bi->hdr_buf = NULL;
1034 /* Free all the Rx ring sk_buffs */
1035 for (i = 0; i < rx_ring->count; i++) {
1036 rx_bi = &rx_ring->rx_bi[i];
1038 dma_unmap_single(dev,
1040 rx_ring->rx_buf_len,
1045 dev_kfree_skb(rx_bi->skb);
1049 if (rx_bi->page_dma) {
1054 rx_bi->page_dma = 0;
1056 __free_page(rx_bi->page);
1058 rx_bi->page_offset = 0;
1062 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1063 memset(rx_ring->rx_bi, 0, bi_size);
1065 /* Zero out the descriptor ring */
1066 memset(rx_ring->desc, 0, rx_ring->size);
1068 rx_ring->next_to_clean = 0;
1069 rx_ring->next_to_use = 0;
1073 * i40e_free_rx_resources - Free Rx resources
1074 * @rx_ring: ring to clean the resources from
1076 * Free all receive software resources
1078 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1080 i40e_clean_rx_ring(rx_ring);
1081 kfree(rx_ring->rx_bi);
1082 rx_ring->rx_bi = NULL;
1084 if (rx_ring->desc) {
1085 dma_free_coherent(rx_ring->dev, rx_ring->size,
1086 rx_ring->desc, rx_ring->dma);
1087 rx_ring->desc = NULL;
1092 * i40e_alloc_rx_headers - allocate rx header buffers
1093 * @rx_ring: ring to alloc buffers
1095 * Allocate rx header buffers for the entire ring. As these are static,
1096 * this is only called when setting up a new ring.
1098 void i40e_alloc_rx_headers(struct i40e_ring *rx_ring)
1100 struct device *dev = rx_ring->dev;
1101 struct i40e_rx_buffer *rx_bi;
1107 if (rx_ring->rx_bi[0].hdr_buf)
1109 /* Make sure the buffers don't cross cache line boundaries. */
1110 buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
1111 buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
1115 for (i = 0; i < rx_ring->count; i++) {
1116 rx_bi = &rx_ring->rx_bi[i];
1117 rx_bi->dma = dma + (i * buf_size);
1118 rx_bi->hdr_buf = buffer + (i * buf_size);
1123 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1124 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1126 * Returns 0 on success, negative on failure
1128 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1130 struct device *dev = rx_ring->dev;
1133 /* warn if we are about to overwrite the pointer */
1134 WARN_ON(rx_ring->rx_bi);
1135 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1136 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1137 if (!rx_ring->rx_bi)
1140 u64_stats_init(&rx_ring->syncp);
1142 /* Round up to nearest 4K */
1143 rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
1144 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
1145 : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1146 rx_ring->size = ALIGN(rx_ring->size, 4096);
1147 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1148 &rx_ring->dma, GFP_KERNEL);
1150 if (!rx_ring->desc) {
1151 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1156 rx_ring->next_to_clean = 0;
1157 rx_ring->next_to_use = 0;
1161 kfree(rx_ring->rx_bi);
1162 rx_ring->rx_bi = NULL;
1167 * i40e_release_rx_desc - Store the new tail and head values
1168 * @rx_ring: ring to bump
1169 * @val: new head index
1171 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1173 rx_ring->next_to_use = val;
1174 /* Force memory writes to complete before letting h/w
1175 * know there are new descriptors to fetch. (Only
1176 * applicable for weak-ordered memory model archs,
1180 writel(val, rx_ring->tail);
1184 * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
1185 * @rx_ring: ring to place buffers on
1186 * @cleaned_count: number of buffers to replace
1188 void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
1190 u16 i = rx_ring->next_to_use;
1191 union i40e_rx_desc *rx_desc;
1192 struct i40e_rx_buffer *bi;
1194 /* do nothing if no valid netdev defined */
1195 if (!rx_ring->netdev || !cleaned_count)
1198 while (cleaned_count--) {
1199 rx_desc = I40E_RX_DESC(rx_ring, i);
1200 bi = &rx_ring->rx_bi[i];
1202 if (bi->skb) /* desc is in use */
1205 bi->page = alloc_page(GFP_ATOMIC);
1207 rx_ring->rx_stats.alloc_page_failed++;
1212 if (!bi->page_dma) {
1213 /* use a half page if we're re-using */
1214 bi->page_offset ^= PAGE_SIZE / 2;
1215 bi->page_dma = dma_map_page(rx_ring->dev,
1220 if (dma_mapping_error(rx_ring->dev,
1222 rx_ring->rx_stats.alloc_page_failed++;
1228 dma_sync_single_range_for_device(rx_ring->dev,
1229 rx_ring->rx_bi[0].dma,
1230 i * rx_ring->rx_hdr_len,
1231 rx_ring->rx_hdr_len,
1233 /* Refresh the desc even if buffer_addrs didn't change
1234 * because each write-back erases this info.
1236 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1237 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1239 if (i == rx_ring->count)
1244 if (rx_ring->next_to_use != i)
1245 i40e_release_rx_desc(rx_ring, i);
1249 * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
1250 * @rx_ring: ring to place buffers on
1251 * @cleaned_count: number of buffers to replace
1253 void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
1255 u16 i = rx_ring->next_to_use;
1256 union i40e_rx_desc *rx_desc;
1257 struct i40e_rx_buffer *bi;
1258 struct sk_buff *skb;
1260 /* do nothing if no valid netdev defined */
1261 if (!rx_ring->netdev || !cleaned_count)
1264 while (cleaned_count--) {
1265 rx_desc = I40E_RX_DESC(rx_ring, i);
1266 bi = &rx_ring->rx_bi[i];
1270 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1271 rx_ring->rx_buf_len);
1273 rx_ring->rx_stats.alloc_buff_failed++;
1276 /* initialize queue mapping */
1277 skb_record_rx_queue(skb, rx_ring->queue_index);
1282 bi->dma = dma_map_single(rx_ring->dev,
1284 rx_ring->rx_buf_len,
1286 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
1287 rx_ring->rx_stats.alloc_buff_failed++;
1293 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
1294 rx_desc->read.hdr_addr = 0;
1296 if (i == rx_ring->count)
1301 if (rx_ring->next_to_use != i)
1302 i40e_release_rx_desc(rx_ring, i);
1306 * i40e_receive_skb - Send a completed packet up the stack
1307 * @rx_ring: rx ring in play
1308 * @skb: packet to send up
1309 * @vlan_tag: vlan tag for packet
1311 static void i40e_receive_skb(struct i40e_ring *rx_ring,
1312 struct sk_buff *skb, u16 vlan_tag)
1314 struct i40e_q_vector *q_vector = rx_ring->q_vector;
1316 if (vlan_tag & VLAN_VID_MASK)
1317 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1319 napi_gro_receive(&q_vector->napi, skb);
1323 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1324 * @vsi: the VSI we care about
1325 * @skb: skb currently being received and modified
1326 * @rx_status: status value of last descriptor in packet
1327 * @rx_error: error value of last descriptor in packet
1328 * @rx_ptype: ptype value of last descriptor in packet
1330 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1331 struct sk_buff *skb,
1336 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
1337 bool ipv4 = false, ipv6 = false;
1338 bool ipv4_tunnel, ipv6_tunnel;
1343 ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
1344 (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
1345 ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
1346 (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
1348 skb->ip_summed = CHECKSUM_NONE;
1350 /* Rx csum enabled and ip headers found? */
1351 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1354 /* did the hardware decode the packet and checksum? */
1355 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1358 /* both known and outer_ip must be set for the below code to work */
1359 if (!(decoded.known && decoded.outer_ip))
1362 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1363 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
1365 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1366 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
1370 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1371 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1374 /* likely incorrect csum if alternate IP extension headers found */
1376 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1377 /* don't increment checksum err here, non-fatal err */
1380 /* there was some L4 error, count error and punt packet to the stack */
1381 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1384 /* handle packets that were not able to be checksummed due
1385 * to arrival speed, in this case the stack can compute
1388 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1391 /* If VXLAN/GENEVE traffic has an outer UDPv4 checksum we need to check
1392 * it in the driver, hardware does not do it for us.
1393 * Since L3L4P bit was set we assume a valid IHL value (>=5)
1394 * so the total length of IPv4 header is IHL*4 bytes
1395 * The UDP_0 bit *may* bet set if the *inner* header is UDP
1397 if (!(vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) &&
1399 skb->transport_header = skb->mac_header +
1400 sizeof(struct ethhdr) +
1401 (ip_hdr(skb)->ihl * 4);
1403 /* Add 4 bytes for VLAN tagged packets */
1404 skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
1405 skb->protocol == htons(ETH_P_8021AD))
1408 if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
1409 (udp_hdr(skb)->check != 0)) {
1410 rx_udp_csum = udp_csum(skb);
1412 csum = csum_tcpudp_magic(
1413 iph->saddr, iph->daddr,
1414 (skb->len - skb_transport_offset(skb)),
1415 IPPROTO_UDP, rx_udp_csum);
1417 if (udp_hdr(skb)->check != csum)
1420 } /* else its GRE and so no outer UDP header */
1423 skb->ip_summed = CHECKSUM_UNNECESSARY;
1424 skb->csum_level = ipv4_tunnel || ipv6_tunnel;
1429 vsi->back->hw_csum_rx_error++;
1433 * i40e_ptype_to_htype - get a hash type
1434 * @ptype: the ptype value from the descriptor
1436 * Returns a hash type to be used by skb_set_hash
1438 static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
1440 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1443 return PKT_HASH_TYPE_NONE;
1445 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1446 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1447 return PKT_HASH_TYPE_L4;
1448 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1449 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1450 return PKT_HASH_TYPE_L3;
1452 return PKT_HASH_TYPE_L2;
1456 * i40e_rx_hash - set the hash value in the skb
1457 * @ring: descriptor ring
1458 * @rx_desc: specific descriptor
1460 static inline void i40e_rx_hash(struct i40e_ring *ring,
1461 union i40e_rx_desc *rx_desc,
1462 struct sk_buff *skb,
1466 const __le64 rss_mask =
1467 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1468 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1470 if (ring->netdev->features & NETIF_F_RXHASH)
1473 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1474 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1475 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1480 * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
1481 * @rx_ring: rx ring to clean
1482 * @budget: how many cleans we're allowed
1484 * Returns true if there's any budget left (e.g. the clean is finished)
1486 static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
1488 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1489 u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
1490 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1491 const int current_node = numa_mem_id();
1492 struct i40e_vsi *vsi = rx_ring->vsi;
1493 u16 i = rx_ring->next_to_clean;
1494 union i40e_rx_desc *rx_desc;
1495 u32 rx_error, rx_status;
1503 struct i40e_rx_buffer *rx_bi;
1504 struct sk_buff *skb;
1506 /* return some buffers to hardware, one at a time is too slow */
1507 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1508 i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count);
1512 i = rx_ring->next_to_clean;
1513 rx_desc = I40E_RX_DESC(rx_ring, i);
1514 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1515 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1516 I40E_RXD_QW1_STATUS_SHIFT;
1518 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
1521 /* This memory barrier is needed to keep us from reading
1522 * any other fields out of the rx_desc until we know the
1526 if (i40e_rx_is_programming_status(qword)) {
1527 i40e_clean_programming_status(rx_ring, rx_desc);
1528 I40E_RX_INCREMENT(rx_ring, i);
1531 rx_bi = &rx_ring->rx_bi[i];
1534 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1535 rx_ring->rx_hdr_len);
1537 rx_ring->rx_stats.alloc_buff_failed++;
1541 /* initialize queue mapping */
1542 skb_record_rx_queue(skb, rx_ring->queue_index);
1543 /* we are reusing so sync this buffer for CPU use */
1544 dma_sync_single_range_for_cpu(rx_ring->dev,
1545 rx_ring->rx_bi[0].dma,
1546 i * rx_ring->rx_hdr_len,
1547 rx_ring->rx_hdr_len,
1550 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1551 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1552 rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
1553 I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
1554 rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
1555 I40E_RXD_QW1_LENGTH_SPH_SHIFT;
1557 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1558 I40E_RXD_QW1_ERROR_SHIFT;
1559 rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1560 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1562 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1563 I40E_RXD_QW1_PTYPE_SHIFT;
1564 prefetch(rx_bi->page);
1567 if (rx_hbo || rx_sph) {
1571 len = I40E_RX_HDR_SIZE;
1573 len = rx_header_len;
1574 memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
1575 } else if (skb->len == 0) {
1578 len = (rx_packet_len > skb_headlen(skb) ?
1579 skb_headlen(skb) : rx_packet_len);
1580 memcpy(__skb_put(skb, len),
1581 rx_bi->page + rx_bi->page_offset,
1583 rx_bi->page_offset += len;
1584 rx_packet_len -= len;
1587 /* Get the rest of the data if this was a header split */
1588 if (rx_packet_len) {
1589 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1594 skb->len += rx_packet_len;
1595 skb->data_len += rx_packet_len;
1596 skb->truesize += rx_packet_len;
1598 if ((page_count(rx_bi->page) == 1) &&
1599 (page_to_nid(rx_bi->page) == current_node))
1600 get_page(rx_bi->page);
1604 dma_unmap_page(rx_ring->dev,
1608 rx_bi->page_dma = 0;
1610 I40E_RX_INCREMENT(rx_ring, i);
1613 !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1614 struct i40e_rx_buffer *next_buffer;
1616 next_buffer = &rx_ring->rx_bi[i];
1617 next_buffer->skb = skb;
1618 rx_ring->rx_stats.non_eop_descs++;
1622 /* ERR_MASK will only have valid bits if EOP set */
1623 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1624 dev_kfree_skb_any(skb);
1628 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1630 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1631 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1632 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1633 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1634 rx_ring->last_rx_timestamp = jiffies;
1637 /* probably a little skewed due to removing CRC */
1638 total_rx_bytes += skb->len;
1641 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1643 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1645 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1646 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1649 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1650 dev_kfree_skb_any(skb);
1654 i40e_receive_skb(rx_ring, skb, vlan_tag);
1656 rx_desc->wb.qword1.status_error_len = 0;
1658 } while (likely(total_rx_packets < budget));
1660 u64_stats_update_begin(&rx_ring->syncp);
1661 rx_ring->stats.packets += total_rx_packets;
1662 rx_ring->stats.bytes += total_rx_bytes;
1663 u64_stats_update_end(&rx_ring->syncp);
1664 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1665 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1667 return total_rx_packets;
1671 * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
1672 * @rx_ring: rx ring to clean
1673 * @budget: how many cleans we're allowed
1675 * Returns number of packets cleaned
1677 static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
1679 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1680 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1681 struct i40e_vsi *vsi = rx_ring->vsi;
1682 union i40e_rx_desc *rx_desc;
1683 u32 rx_error, rx_status;
1690 struct i40e_rx_buffer *rx_bi;
1691 struct sk_buff *skb;
1693 /* return some buffers to hardware, one at a time is too slow */
1694 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1695 i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count);
1699 i = rx_ring->next_to_clean;
1700 rx_desc = I40E_RX_DESC(rx_ring, i);
1701 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1702 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1703 I40E_RXD_QW1_STATUS_SHIFT;
1705 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
1708 /* This memory barrier is needed to keep us from reading
1709 * any other fields out of the rx_desc until we know the
1714 if (i40e_rx_is_programming_status(qword)) {
1715 i40e_clean_programming_status(rx_ring, rx_desc);
1716 I40E_RX_INCREMENT(rx_ring, i);
1719 rx_bi = &rx_ring->rx_bi[i];
1721 prefetch(skb->data);
1723 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1724 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1726 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1727 I40E_RXD_QW1_ERROR_SHIFT;
1728 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1730 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1731 I40E_RXD_QW1_PTYPE_SHIFT;
1735 /* Get the header and possibly the whole packet
1736 * If this is an skb from previous receive dma will be 0
1738 skb_put(skb, rx_packet_len);
1739 dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
1743 I40E_RX_INCREMENT(rx_ring, i);
1746 !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1747 rx_ring->rx_stats.non_eop_descs++;
1751 /* ERR_MASK will only have valid bits if EOP set */
1752 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1753 dev_kfree_skb_any(skb);
1757 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1758 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1759 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1760 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1761 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1762 rx_ring->last_rx_timestamp = jiffies;
1765 /* probably a little skewed due to removing CRC */
1766 total_rx_bytes += skb->len;
1769 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1771 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1773 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1774 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1777 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1778 dev_kfree_skb_any(skb);
1782 i40e_receive_skb(rx_ring, skb, vlan_tag);
1784 rx_desc->wb.qword1.status_error_len = 0;
1785 } while (likely(total_rx_packets < budget));
1787 u64_stats_update_begin(&rx_ring->syncp);
1788 rx_ring->stats.packets += total_rx_packets;
1789 rx_ring->stats.bytes += total_rx_bytes;
1790 u64_stats_update_end(&rx_ring->syncp);
1791 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1792 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1794 return total_rx_packets;
1797 static u32 i40e_buildreg_itr(const int type, const u16 itr)
1801 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1802 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1803 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1804 (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
1809 /* a small macro to shorten up some long lines */
1810 #define INTREG I40E_PFINT_DYN_CTLN
1813 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
1814 * @vsi: the VSI we care about
1815 * @q_vector: q_vector for which itr is being updated and interrupt enabled
1818 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
1819 struct i40e_q_vector *q_vector)
1821 struct i40e_hw *hw = &vsi->back->hw;
1822 bool rx = false, tx = false;
1826 vector = (q_vector->v_idx + vsi->base_vector);
1828 /* avoid dynamic calculation if in countdown mode OR if
1829 * all dynamic is disabled
1831 rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
1833 if (q_vector->itr_countdown > 0 ||
1834 (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) &&
1835 !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) {
1839 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
1840 rx = i40e_set_new_dynamic_itr(&q_vector->rx);
1841 rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
1844 if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
1845 tx = i40e_set_new_dynamic_itr(&q_vector->tx);
1846 txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
1850 /* get the higher of the two ITR adjustments and
1851 * use the same value for both ITR registers
1852 * when in adaptive mode (Rx and/or Tx)
1854 u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
1856 q_vector->tx.itr = q_vector->rx.itr = itr;
1857 txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
1859 rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
1863 /* only need to enable the interrupt once, but need
1864 * to possibly update both ITR values
1867 /* set the INTENA_MSK_MASK so that this first write
1868 * won't actually enable the interrupt, instead just
1869 * updating the ITR (it's bit 31 PF and VF)
1872 /* don't check _DOWN because interrupt isn't being enabled */
1873 wr32(hw, INTREG(vector - 1), rxval);
1877 if (!test_bit(__I40E_DOWN, &vsi->state))
1878 wr32(hw, INTREG(vector - 1), txval);
1880 if (q_vector->itr_countdown)
1881 q_vector->itr_countdown--;
1883 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1887 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
1888 * @napi: napi struct with our devices info in it
1889 * @budget: amount of work driver is allowed to do this pass, in packets
1891 * This function will clean all queues associated with a q_vector.
1893 * Returns the amount of work done
1895 int i40e_napi_poll(struct napi_struct *napi, int budget)
1897 struct i40e_q_vector *q_vector =
1898 container_of(napi, struct i40e_q_vector, napi);
1899 struct i40e_vsi *vsi = q_vector->vsi;
1900 struct i40e_ring *ring;
1901 bool clean_complete = true;
1902 bool arm_wb = false;
1903 int budget_per_ring;
1906 if (test_bit(__I40E_DOWN, &vsi->state)) {
1907 napi_complete(napi);
1911 /* Clear hung_detected bit */
1912 clear_bit(I40E_Q_VECTOR_HUNG_DETECT, &q_vector->hung_detected);
1913 /* Since the actual Tx work is minimal, we can give the Tx a larger
1914 * budget and be more aggressive about cleaning up the Tx descriptors.
1916 i40e_for_each_ring(ring, q_vector->tx) {
1917 clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
1918 arm_wb = arm_wb || ring->arm_wb;
1919 ring->arm_wb = false;
1922 /* Handle case where we are called by netpoll with a budget of 0 */
1926 /* We attempt to distribute budget to each Rx queue fairly, but don't
1927 * allow the budget to go below 1 because that would exit polling early.
1929 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
1931 i40e_for_each_ring(ring, q_vector->rx) {
1934 if (ring_is_ps_enabled(ring))
1935 cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
1937 cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
1939 work_done += cleaned;
1940 /* if we didn't clean as many as budgeted, we must be done */
1941 clean_complete &= (budget_per_ring != cleaned);
1944 /* If work not completed, return budget and polling will return */
1945 if (!clean_complete) {
1948 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
1949 i40e_force_wb(vsi, q_vector);
1954 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
1955 q_vector->arm_wb_state = false;
1957 /* Work is done so exit the polling mode and re-enable the interrupt */
1958 napi_complete_done(napi, work_done);
1959 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
1960 i40e_update_enable_itr(vsi, q_vector);
1961 } else { /* Legacy mode */
1962 struct i40e_hw *hw = &vsi->back->hw;
1963 /* We re-enable the queue 0 cause, but
1964 * don't worry about dynamic_enable
1965 * because we left it on for the other
1966 * possible interrupts during napi
1968 u32 qval = rd32(hw, I40E_QINT_RQCTL(0)) |
1969 I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1971 wr32(hw, I40E_QINT_RQCTL(0), qval);
1972 qval = rd32(hw, I40E_QINT_TQCTL(0)) |
1973 I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1974 wr32(hw, I40E_QINT_TQCTL(0), qval);
1975 i40e_irq_dynamic_enable_icr0(vsi->back);
1981 * i40e_atr - Add a Flow Director ATR filter
1982 * @tx_ring: ring to add programming descriptor to
1984 * @tx_flags: send tx flags
1985 * @protocol: wire protocol
1987 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
1988 u32 tx_flags, __be16 protocol)
1990 struct i40e_filter_program_desc *fdir_desc;
1991 struct i40e_pf *pf = tx_ring->vsi->back;
1993 unsigned char *network;
1995 struct ipv6hdr *ipv6;
1999 u32 flex_ptype, dtype_cmd;
2002 /* make sure ATR is enabled */
2003 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
2006 if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
2009 /* if sampling is disabled do nothing */
2010 if (!tx_ring->atr_sample_rate)
2013 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
2016 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL)) {
2017 /* snag network header to get L4 type and address */
2018 hdr.network = skb_network_header(skb);
2020 /* Currently only IPv4/IPv6 with TCP is supported
2021 * access ihl as u8 to avoid unaligned access on ia64
2023 if (tx_flags & I40E_TX_FLAGS_IPV4)
2024 hlen = (hdr.network[0] & 0x0F) << 2;
2025 else if (protocol == htons(ETH_P_IPV6))
2026 hlen = sizeof(struct ipv6hdr);
2030 hdr.network = skb_inner_network_header(skb);
2031 hlen = skb_inner_network_header_len(skb);
2034 /* Currently only IPv4/IPv6 with TCP is supported
2035 * Note: tx_flags gets modified to reflect inner protocols in
2036 * tx_enable_csum function if encap is enabled.
2038 if ((tx_flags & I40E_TX_FLAGS_IPV4) &&
2039 (hdr.ipv4->protocol != IPPROTO_TCP))
2041 else if ((tx_flags & I40E_TX_FLAGS_IPV6) &&
2042 (hdr.ipv6->nexthdr != IPPROTO_TCP))
2045 th = (struct tcphdr *)(hdr.network + hlen);
2047 /* Due to lack of space, no more new filters can be programmed */
2048 if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
2050 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) {
2051 /* HW ATR eviction will take care of removing filters on FIN
2054 if (th->fin || th->rst)
2058 tx_ring->atr_count++;
2060 /* sample on all syn/fin/rst packets or once every atr sample rate */
2064 (tx_ring->atr_count < tx_ring->atr_sample_rate))
2067 tx_ring->atr_count = 0;
2069 /* grab the next descriptor */
2070 i = tx_ring->next_to_use;
2071 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2074 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2076 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2077 I40E_TXD_FLTR_QW0_QINDEX_MASK;
2078 flex_ptype |= (protocol == htons(ETH_P_IP)) ?
2079 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2080 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2081 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2082 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2084 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2086 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2088 dtype_cmd |= (th->fin || th->rst) ?
2089 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2090 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2091 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2092 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2094 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2095 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2097 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2098 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2100 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2101 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
2103 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2104 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2105 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2108 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2109 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2110 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2112 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
2113 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2115 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2116 fdir_desc->rsvd = cpu_to_le32(0);
2117 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2118 fdir_desc->fd_id = cpu_to_le32(0);
2122 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2124 * @tx_ring: ring to send buffer on
2125 * @flags: the tx flags to be set
2127 * Checks the skb and set up correspondingly several generic transmit flags
2128 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2130 * Returns error code indicate the frame should be dropped upon error and the
2131 * otherwise returns 0 to indicate the flags has been set properly.
2134 inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2135 struct i40e_ring *tx_ring,
2138 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2139 struct i40e_ring *tx_ring,
2143 __be16 protocol = skb->protocol;
2146 if (protocol == htons(ETH_P_8021Q) &&
2147 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2148 /* When HW VLAN acceleration is turned off by the user the
2149 * stack sets the protocol to 8021q so that the driver
2150 * can take any steps required to support the SW only
2151 * VLAN handling. In our case the driver doesn't need
2152 * to take any further steps so just set the protocol
2153 * to the encapsulated ethertype.
2155 skb->protocol = vlan_get_protocol(skb);
2159 /* if we have a HW VLAN tag being added, default to the HW one */
2160 if (skb_vlan_tag_present(skb)) {
2161 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2162 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2163 /* else if it is a SW VLAN, check the next protocol and store the tag */
2164 } else if (protocol == htons(ETH_P_8021Q)) {
2165 struct vlan_hdr *vhdr, _vhdr;
2167 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2171 protocol = vhdr->h_vlan_encapsulated_proto;
2172 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2173 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2176 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2179 /* Insert 802.1p priority into VLAN header */
2180 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2181 (skb->priority != TC_PRIO_CONTROL)) {
2182 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2183 tx_flags |= (skb->priority & 0x7) <<
2184 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2185 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2186 struct vlan_ethhdr *vhdr;
2189 rc = skb_cow_head(skb, 0);
2192 vhdr = (struct vlan_ethhdr *)skb->data;
2193 vhdr->h_vlan_TCI = htons(tx_flags >>
2194 I40E_TX_FLAGS_VLAN_SHIFT);
2196 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2206 * i40e_tso - set up the tso context descriptor
2207 * @tx_ring: ptr to the ring to send
2208 * @skb: ptr to the skb we're sending
2209 * @hdr_len: ptr to the size of the packet header
2210 * @cd_type_cmd_tso_mss: Quad Word 1
2212 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2214 static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
2215 u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
2217 u32 cd_cmd, cd_tso_len, cd_mss;
2218 struct ipv6hdr *ipv6h;
2219 struct tcphdr *tcph;
2224 if (!skb_is_gso(skb))
2227 err = skb_cow_head(skb, 0);
2231 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
2232 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
2234 if (iph->version == 4) {
2235 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
2238 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
2240 } else if (ipv6h->version == 6) {
2241 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
2242 ipv6h->payload_len = 0;
2243 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
2247 l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
2248 *hdr_len = (skb->encapsulation
2249 ? (skb_inner_transport_header(skb) - skb->data)
2250 : skb_transport_offset(skb)) + l4len;
2252 /* find the field values */
2253 cd_cmd = I40E_TX_CTX_DESC_TSO;
2254 cd_tso_len = skb->len - *hdr_len;
2255 cd_mss = skb_shinfo(skb)->gso_size;
2256 *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2258 I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2259 ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
2264 * i40e_tsyn - set up the tsyn context descriptor
2265 * @tx_ring: ptr to the ring to send
2266 * @skb: ptr to the skb we're sending
2267 * @tx_flags: the collected send information
2268 * @cd_type_cmd_tso_mss: Quad Word 1
2270 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2272 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2273 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2277 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2280 /* Tx timestamps cannot be sampled when doing TSO */
2281 if (tx_flags & I40E_TX_FLAGS_TSO)
2284 /* only timestamp the outbound packet if the user has requested it and
2285 * we are not already transmitting a packet to be timestamped
2287 pf = i40e_netdev_to_pf(tx_ring->netdev);
2288 if (!(pf->flags & I40E_FLAG_PTP))
2292 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
2293 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2294 pf->ptp_tx_skb = skb_get(skb);
2299 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
2300 I40E_TXD_CTX_QW1_CMD_SHIFT;
2306 * i40e_tx_enable_csum - Enable Tx checksum offloads
2308 * @tx_flags: pointer to Tx flags currently set
2309 * @td_cmd: Tx descriptor command bits to set
2310 * @td_offset: Tx descriptor header offsets to set
2311 * @tx_ring: Tx descriptor ring
2312 * @cd_tunneling: ptr to context desc bits
2314 static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
2315 u32 *td_cmd, u32 *td_offset,
2316 struct i40e_ring *tx_ring,
2319 struct ipv6hdr *this_ipv6_hdr;
2320 unsigned int this_tcp_hdrlen;
2321 struct iphdr *this_ip_hdr;
2322 u32 network_hdr_len;
2324 struct udphdr *oudph = NULL;
2325 struct iphdr *oiph = NULL;
2328 if (skb->encapsulation) {
2329 switch (ip_hdr(skb)->protocol) {
2331 oudph = udp_hdr(skb);
2333 l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
2334 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
2337 l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING;
2342 network_hdr_len = skb_inner_network_header_len(skb);
2343 this_ip_hdr = inner_ip_hdr(skb);
2344 this_ipv6_hdr = inner_ipv6_hdr(skb);
2345 this_tcp_hdrlen = inner_tcp_hdrlen(skb);
2347 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2348 if (*tx_flags & I40E_TX_FLAGS_TSO) {
2349 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
2350 ip_hdr(skb)->check = 0;
2353 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2355 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2356 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
2357 if (*tx_flags & I40E_TX_FLAGS_TSO)
2358 ip_hdr(skb)->check = 0;
2361 /* Now set the ctx descriptor fields */
2362 *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
2363 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
2365 ((skb_inner_network_offset(skb) -
2366 skb_transport_offset(skb)) >> 1) <<
2367 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
2368 if (this_ip_hdr->version == 6) {
2369 *tx_flags &= ~I40E_TX_FLAGS_IPV4;
2370 *tx_flags |= I40E_TX_FLAGS_IPV6;
2372 if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) &&
2373 (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) &&
2374 (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) {
2375 oudph->check = ~csum_tcpudp_magic(oiph->saddr,
2377 (skb->len - skb_transport_offset(skb)),
2379 *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
2382 network_hdr_len = skb_network_header_len(skb);
2383 this_ip_hdr = ip_hdr(skb);
2384 this_ipv6_hdr = ipv6_hdr(skb);
2385 this_tcp_hdrlen = tcp_hdrlen(skb);
2388 /* Enable IP checksum offloads */
2389 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2390 l4_hdr = this_ip_hdr->protocol;
2391 /* the stack computes the IP header already, the only time we
2392 * need the hardware to recompute it is in the case of TSO.
2394 if (*tx_flags & I40E_TX_FLAGS_TSO) {
2395 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
2396 this_ip_hdr->check = 0;
2398 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
2400 /* Now set the td_offset for IP header length */
2401 *td_offset = (network_hdr_len >> 2) <<
2402 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2403 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2404 l4_hdr = this_ipv6_hdr->nexthdr;
2405 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
2406 /* Now set the td_offset for IP header length */
2407 *td_offset = (network_hdr_len >> 2) <<
2408 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2410 /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
2411 *td_offset |= (skb_network_offset(skb) >> 1) <<
2412 I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
2414 /* Enable L4 checksum offloads */
2417 /* enable checksum offloads */
2418 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
2419 *td_offset |= (this_tcp_hdrlen >> 2) <<
2420 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2423 /* enable SCTP checksum offload */
2424 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
2425 *td_offset |= (sizeof(struct sctphdr) >> 2) <<
2426 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2429 /* enable UDP checksum offload */
2430 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
2431 *td_offset |= (sizeof(struct udphdr) >> 2) <<
2432 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2440 * i40e_create_tx_ctx Build the Tx context descriptor
2441 * @tx_ring: ring to create the descriptor on
2442 * @cd_type_cmd_tso_mss: Quad Word 1
2443 * @cd_tunneling: Quad Word 0 - bits 0-31
2444 * @cd_l2tag2: Quad Word 0 - bits 32-63
2446 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
2447 const u64 cd_type_cmd_tso_mss,
2448 const u32 cd_tunneling, const u32 cd_l2tag2)
2450 struct i40e_tx_context_desc *context_desc;
2451 int i = tx_ring->next_to_use;
2453 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
2454 !cd_tunneling && !cd_l2tag2)
2457 /* grab the next descriptor */
2458 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
2461 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2463 /* cpu_to_le32 and assign to struct fields */
2464 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2465 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
2466 context_desc->rsvd = cpu_to_le16(0);
2467 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2471 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2472 * @tx_ring: the ring to be checked
2473 * @size: the size buffer we want to assure is available
2475 * Returns -EBUSY if a stop is needed, else 0
2477 static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2479 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2480 /* Memory barrier before checking head and tail */
2483 /* Check again in a case another CPU has just made room available. */
2484 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2487 /* A reprieve! - use start_queue because it doesn't call schedule */
2488 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2489 ++tx_ring->tx_stats.restart_queue;
2494 * i40e_maybe_stop_tx - 1st level check for tx stop conditions
2495 * @tx_ring: the ring to be checked
2496 * @size: the size buffer we want to assure is available
2498 * Returns 0 if stop is not needed
2501 inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2503 static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2506 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
2508 return __i40e_maybe_stop_tx(tx_ring, size);
2512 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
2514 * @tx_flags: collected send information
2516 * Note: Our HW can't scatter-gather more than 8 fragments to build
2517 * a packet on the wire and so we need to figure out the cases where we
2518 * need to linearize the skb.
2520 static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
2522 struct skb_frag_struct *frag;
2523 bool linearize = false;
2524 unsigned int size = 0;
2528 num_frags = skb_shinfo(skb)->nr_frags;
2529 gso_segs = skb_shinfo(skb)->gso_segs;
2531 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
2534 if (num_frags < (I40E_MAX_BUFFER_TXD))
2535 goto linearize_chk_done;
2536 /* try the simple math, if we have too many frags per segment */
2537 if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
2538 I40E_MAX_BUFFER_TXD) {
2540 goto linearize_chk_done;
2542 frag = &skb_shinfo(skb)->frags[0];
2543 /* we might still have more fragments per segment */
2545 size += skb_frag_size(frag);
2547 if ((size >= skb_shinfo(skb)->gso_size) &&
2548 (j < I40E_MAX_BUFFER_TXD)) {
2549 size = (size % skb_shinfo(skb)->gso_size);
2552 if (j == I40E_MAX_BUFFER_TXD) {
2557 } while (num_frags);
2559 if (num_frags >= I40E_MAX_BUFFER_TXD)
2568 * i40e_tx_map - Build the Tx descriptor
2569 * @tx_ring: ring to send buffer on
2571 * @first: first buffer info buffer to use
2572 * @tx_flags: collected send information
2573 * @hdr_len: size of the packet header
2574 * @td_cmd: the command field in the descriptor
2575 * @td_offset: offset for checksum or crc
2578 inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2579 struct i40e_tx_buffer *first, u32 tx_flags,
2580 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2582 static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2583 struct i40e_tx_buffer *first, u32 tx_flags,
2584 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2587 unsigned int data_len = skb->data_len;
2588 unsigned int size = skb_headlen(skb);
2589 struct skb_frag_struct *frag;
2590 struct i40e_tx_buffer *tx_bi;
2591 struct i40e_tx_desc *tx_desc;
2592 u16 i = tx_ring->next_to_use;
2597 bool tail_bump = true;
2600 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
2601 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
2602 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
2603 I40E_TX_FLAGS_VLAN_SHIFT;
2606 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
2607 gso_segs = skb_shinfo(skb)->gso_segs;
2611 /* multiply data chunks by size of headers */
2612 first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
2613 first->gso_segs = gso_segs;
2615 first->tx_flags = tx_flags;
2617 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2619 tx_desc = I40E_TX_DESC(tx_ring, i);
2622 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2623 if (dma_mapping_error(tx_ring->dev, dma))
2626 /* record length, and DMA address */
2627 dma_unmap_len_set(tx_bi, len, size);
2628 dma_unmap_addr_set(tx_bi, dma, dma);
2630 tx_desc->buffer_addr = cpu_to_le64(dma);
2632 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
2633 tx_desc->cmd_type_offset_bsz =
2634 build_ctob(td_cmd, td_offset,
2635 I40E_MAX_DATA_PER_TXD, td_tag);
2641 if (i == tx_ring->count) {
2642 tx_desc = I40E_TX_DESC(tx_ring, 0);
2646 dma += I40E_MAX_DATA_PER_TXD;
2647 size -= I40E_MAX_DATA_PER_TXD;
2649 tx_desc->buffer_addr = cpu_to_le64(dma);
2652 if (likely(!data_len))
2655 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2662 if (i == tx_ring->count) {
2663 tx_desc = I40E_TX_DESC(tx_ring, 0);
2667 size = skb_frag_size(frag);
2670 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2673 tx_bi = &tx_ring->tx_bi[i];
2676 /* set next_to_watch value indicating a packet is present */
2677 first->next_to_watch = tx_desc;
2680 if (i == tx_ring->count)
2683 tx_ring->next_to_use = i;
2685 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
2686 tx_ring->queue_index),
2688 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
2690 /* Algorithm to optimize tail and RS bit setting:
2691 * if xmit_more is supported
2692 * if xmit_more is true
2693 * do not update tail and do not mark RS bit.
2694 * if xmit_more is false and last xmit_more was false
2695 * if every packet spanned less than 4 desc
2696 * then set RS bit on 4th packet and update tail
2699 * update tail and set RS bit on every packet.
2700 * if xmit_more is false and last_xmit_more was true
2701 * update tail and set RS bit.
2703 * Optimization: wmb to be issued only in case of tail update.
2704 * Also optimize the Descriptor WB path for RS bit with the same
2707 * Note: If there are less than 4 packets
2708 * pending and interrupts were disabled the service task will
2709 * trigger a force WB.
2711 if (skb->xmit_more &&
2712 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2713 tx_ring->queue_index))) {
2714 tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2716 } else if (!skb->xmit_more &&
2717 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2718 tx_ring->queue_index)) &&
2719 (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
2720 (tx_ring->packet_stride < WB_STRIDE) &&
2721 (desc_count < WB_STRIDE)) {
2722 tx_ring->packet_stride++;
2724 tx_ring->packet_stride = 0;
2725 tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2729 tx_ring->packet_stride = 0;
2731 tx_desc->cmd_type_offset_bsz =
2732 build_ctob(td_cmd, td_offset, size, td_tag) |
2733 cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
2734 I40E_TX_DESC_CMD_EOP) <<
2735 I40E_TXD_QW1_CMD_SHIFT);
2737 /* notify HW of packet */
2739 prefetchw(tx_desc + 1);
2742 /* Force memory writes to complete before letting h/w
2743 * know there are new descriptors to fetch. (Only
2744 * applicable for weak-ordered memory model archs,
2748 writel(i, tx_ring->tail);
2754 dev_info(tx_ring->dev, "TX DMA map failed\n");
2756 /* clear dma mappings for failed tx_bi map */
2758 tx_bi = &tx_ring->tx_bi[i];
2759 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
2767 tx_ring->next_to_use = i;
2771 * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
2773 * @tx_ring: ring to send buffer on
2775 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
2776 * there is not enough descriptors available in this ring since we need at least
2780 inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
2781 struct i40e_ring *tx_ring)
2783 static inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
2784 struct i40e_ring *tx_ring)
2790 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
2791 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
2792 * + 4 desc gap to avoid the cache line where head is,
2793 * + 1 desc for context descriptor,
2794 * otherwise try next time
2796 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2797 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2799 count += TXD_USE_COUNT(skb_headlen(skb));
2800 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2801 tx_ring->tx_stats.tx_busy++;
2808 * i40e_xmit_frame_ring - Sends buffer on Tx ring
2810 * @tx_ring: ring to send buffer on
2812 * Returns NETDEV_TX_OK if sent, else an error code
2814 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2815 struct i40e_ring *tx_ring)
2817 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
2818 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2819 struct i40e_tx_buffer *first;
2828 /* prefetch the data, we'll need it later */
2829 prefetch(skb->data);
2831 if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
2832 return NETDEV_TX_BUSY;
2834 /* prepare the xmit flags */
2835 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2838 /* obtain protocol of skb */
2839 protocol = vlan_get_protocol(skb);
2841 /* record the location of the first descriptor for this packet */
2842 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2844 /* setup IPv4/IPv6 offloads */
2845 if (protocol == htons(ETH_P_IP))
2846 tx_flags |= I40E_TX_FLAGS_IPV4;
2847 else if (protocol == htons(ETH_P_IPV6))
2848 tx_flags |= I40E_TX_FLAGS_IPV6;
2850 tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss);
2855 tx_flags |= I40E_TX_FLAGS_TSO;
2857 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
2860 tx_flags |= I40E_TX_FLAGS_TSYN;
2862 if (i40e_chk_linearize(skb, tx_flags)) {
2863 if (skb_linearize(skb))
2865 tx_ring->tx_stats.tx_linearize++;
2867 skb_tx_timestamp(skb);
2869 /* always enable CRC insertion offload */
2870 td_cmd |= I40E_TX_DESC_CMD_ICRC;
2872 /* Always offload the checksum, since it's in the data descriptor */
2873 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2874 tx_flags |= I40E_TX_FLAGS_CSUM;
2876 i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2877 tx_ring, &cd_tunneling);
2880 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2881 cd_tunneling, cd_l2tag2);
2883 /* Add Flow Director ATR if it's enabled.
2885 * NOTE: this must always be directly before the data descriptor.
2887 i40e_atr(tx_ring, skb, tx_flags, protocol);
2889 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2892 return NETDEV_TX_OK;
2895 dev_kfree_skb_any(skb);
2896 return NETDEV_TX_OK;
2900 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2902 * @netdev: network interface device structure
2904 * Returns NETDEV_TX_OK if sent, else an error code
2906 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2908 struct i40e_netdev_priv *np = netdev_priv(netdev);
2909 struct i40e_vsi *vsi = np->vsi;
2910 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
2912 /* hardware can't handle really short frames, hardware padding works
2915 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
2916 return NETDEV_TX_OK;
2918 return i40e_xmit_frame_ring(skb, tx_ring);