1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2016 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
27 #include <linux/prefetch.h>
28 #include <net/busy_poll.h>
30 #include "i40e_prototype.h"
32 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
35 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
36 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
37 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
38 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
39 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
42 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
43 #define I40E_FD_CLEAN_DELAY 10
45 * i40e_program_fdir_filter - Program a Flow Director filter
46 * @fdir_data: Packet data that will be filter parameters
47 * @raw_packet: the pre-allocated packet buffer for FDir
49 * @add: True for add/update, False for remove
51 int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
52 struct i40e_pf *pf, bool add)
54 struct i40e_filter_program_desc *fdir_desc;
55 struct i40e_tx_buffer *tx_buf, *first;
56 struct i40e_tx_desc *tx_desc;
57 struct i40e_ring *tx_ring;
58 unsigned int fpt, dcc;
66 /* find existing FDIR VSI */
68 for (i = 0; i < pf->num_alloc_vsi; i++)
69 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
74 tx_ring = vsi->tx_rings[0];
77 /* we need two descriptors to add/del a filter and we can wait */
79 if (I40E_DESC_UNUSED(tx_ring) > 1)
81 msleep_interruptible(1);
83 } while (delay < I40E_FD_CLEAN_DELAY);
85 if (!(I40E_DESC_UNUSED(tx_ring) > 1))
88 dma = dma_map_single(dev, raw_packet,
89 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
90 if (dma_mapping_error(dev, dma))
93 /* grab the next descriptor */
94 i = tx_ring->next_to_use;
95 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
96 first = &tx_ring->tx_bi[i];
97 memset(first, 0, sizeof(struct i40e_tx_buffer));
99 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
101 fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
102 I40E_TXD_FLTR_QW0_QINDEX_MASK;
104 fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
105 I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
107 fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
108 I40E_TXD_FLTR_QW0_PCTYPE_MASK;
110 /* Use LAN VSI Id if not programmed by user */
111 if (fdir_data->dest_vsi == 0)
112 fpt |= (pf->vsi[pf->lan_vsi]->id) <<
113 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
115 fpt |= ((u32)fdir_data->dest_vsi <<
116 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
117 I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
119 dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
122 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
123 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
125 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
126 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
128 dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
129 I40E_TXD_FLTR_QW1_DEST_MASK;
131 dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
132 I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
134 if (fdir_data->cnt_index != 0) {
135 dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
136 dcc |= ((u32)fdir_data->cnt_index <<
137 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
138 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
141 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
142 fdir_desc->rsvd = cpu_to_le32(0);
143 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
144 fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
146 /* Now program a dummy descriptor */
147 i = tx_ring->next_to_use;
148 tx_desc = I40E_TX_DESC(tx_ring, i);
149 tx_buf = &tx_ring->tx_bi[i];
151 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
153 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
155 /* record length, and DMA address */
156 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
157 dma_unmap_addr_set(tx_buf, dma, dma);
159 tx_desc->buffer_addr = cpu_to_le64(dma);
160 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
162 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
163 tx_buf->raw_buf = (void *)raw_packet;
165 tx_desc->cmd_type_offset_bsz =
166 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
168 /* Force memory writes to complete before letting h/w
169 * know there are new descriptors to fetch.
173 /* Mark the data descriptor to be watched */
174 first->next_to_watch = tx_desc;
176 writel(tx_ring->next_to_use, tx_ring->tail);
183 #define IP_HEADER_OFFSET 14
184 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
186 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
187 * @vsi: pointer to the targeted VSI
188 * @fd_data: the flow director data required for the FDir descriptor
189 * @add: true adds a filter, false removes it
191 * Returns 0 if the filters were successfully added or removed
193 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
194 struct i40e_fdir_filter *fd_data,
197 struct i40e_pf *pf = vsi->back;
203 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
204 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
205 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
207 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
210 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
212 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
213 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
214 + sizeof(struct iphdr));
216 ip->daddr = fd_data->dst_ip[0];
217 udp->dest = fd_data->dst_port;
218 ip->saddr = fd_data->src_ip[0];
219 udp->source = fd_data->src_port;
221 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
222 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
224 dev_info(&pf->pdev->dev,
225 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
226 fd_data->pctype, fd_data->fd_id, ret);
228 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
230 dev_info(&pf->pdev->dev,
231 "Filter OK for PCTYPE %d loc = %d\n",
232 fd_data->pctype, fd_data->fd_id);
234 dev_info(&pf->pdev->dev,
235 "Filter deleted for PCTYPE %d loc = %d\n",
236 fd_data->pctype, fd_data->fd_id);
241 return err ? -EOPNOTSUPP : 0;
244 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
246 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
247 * @vsi: pointer to the targeted VSI
248 * @fd_data: the flow director data required for the FDir descriptor
249 * @add: true adds a filter, false removes it
251 * Returns 0 if the filters were successfully added or removed
253 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
254 struct i40e_fdir_filter *fd_data,
257 struct i40e_pf *pf = vsi->back;
264 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
265 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
266 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
267 0x0, 0x72, 0, 0, 0, 0};
269 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
272 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
274 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
275 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
276 + sizeof(struct iphdr));
278 ip->daddr = fd_data->dst_ip[0];
279 tcp->dest = fd_data->dst_port;
280 ip->saddr = fd_data->src_ip[0];
281 tcp->source = fd_data->src_port;
285 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
286 if (I40E_DEBUG_FD & pf->hw.debug_mask)
287 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
288 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
291 pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
292 (pf->fd_tcp_rule - 1) : 0;
293 if (pf->fd_tcp_rule == 0) {
294 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
295 if (I40E_DEBUG_FD & pf->hw.debug_mask)
296 dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
300 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
301 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
304 dev_info(&pf->pdev->dev,
305 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
306 fd_data->pctype, fd_data->fd_id, ret);
308 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
310 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
311 fd_data->pctype, fd_data->fd_id);
313 dev_info(&pf->pdev->dev,
314 "Filter deleted for PCTYPE %d loc = %d\n",
315 fd_data->pctype, fd_data->fd_id);
321 return err ? -EOPNOTSUPP : 0;
325 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
326 * a specific flow spec
327 * @vsi: pointer to the targeted VSI
328 * @fd_data: the flow director data required for the FDir descriptor
329 * @add: true adds a filter, false removes it
331 * Returns 0 if the filters were successfully added or removed
333 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
334 struct i40e_fdir_filter *fd_data,
340 #define I40E_IP_DUMMY_PACKET_LEN 34
342 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
343 * a specific flow spec
344 * @vsi: pointer to the targeted VSI
345 * @fd_data: the flow director data required for the FDir descriptor
346 * @add: true adds a filter, false removes it
348 * Returns 0 if the filters were successfully added or removed
350 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
351 struct i40e_fdir_filter *fd_data,
354 struct i40e_pf *pf = vsi->back;
360 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
361 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
364 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
365 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
366 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
369 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
370 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
372 ip->saddr = fd_data->src_ip[0];
373 ip->daddr = fd_data->dst_ip[0];
377 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
380 dev_info(&pf->pdev->dev,
381 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
382 fd_data->pctype, fd_data->fd_id, ret);
384 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
386 dev_info(&pf->pdev->dev,
387 "Filter OK for PCTYPE %d loc = %d\n",
388 fd_data->pctype, fd_data->fd_id);
390 dev_info(&pf->pdev->dev,
391 "Filter deleted for PCTYPE %d loc = %d\n",
392 fd_data->pctype, fd_data->fd_id);
399 return err ? -EOPNOTSUPP : 0;
403 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
404 * @vsi: pointer to the targeted VSI
405 * @cmd: command to get or set RX flow classification rules
406 * @add: true adds a filter, false removes it
409 int i40e_add_del_fdir(struct i40e_vsi *vsi,
410 struct i40e_fdir_filter *input, bool add)
412 struct i40e_pf *pf = vsi->back;
415 switch (input->flow_type & ~FLOW_EXT) {
417 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
420 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
423 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
426 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
429 switch (input->ip4_proto) {
431 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
434 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
437 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
440 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
445 dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
450 /* The buffer allocated here is freed by the i40e_clean_tx_ring() */
455 * i40e_fd_handle_status - check the Programming Status for FD
456 * @rx_ring: the Rx ring for this descriptor
457 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
458 * @prog_id: the id originally used for programming
460 * This is used to verify if the FD programming or invalidation
461 * requested by SW to the HW is successful or not and take actions accordingly.
463 static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
464 union i40e_rx_desc *rx_desc, u8 prog_id)
466 struct i40e_pf *pf = rx_ring->vsi->back;
467 struct pci_dev *pdev = pf->pdev;
468 u32 fcnt_prog, fcnt_avail;
472 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
473 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
474 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
476 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
477 pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
478 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
479 (I40E_DEBUG_FD & pf->hw.debug_mask))
480 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
483 /* Check if the programming error is for ATR.
484 * If so, auto disable ATR and set a state for
485 * flush in progress. Next time we come here if flush is in
486 * progress do nothing, once flush is complete the state will
489 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
493 /* store the current atr filter count */
494 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
496 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
497 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
498 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
499 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
502 /* filter programming failed most likely due to table full */
503 fcnt_prog = i40e_get_global_fd_count(pf);
504 fcnt_avail = pf->fdir_pf_filter_count;
505 /* If ATR is running fcnt_prog can quickly change,
506 * if we are very close to full, it makes sense to disable
507 * FD ATR/SB and then re-enable it when there is room.
509 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
510 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
511 !(pf->auto_disable_flags &
512 I40E_FLAG_FD_SB_ENABLED)) {
513 if (I40E_DEBUG_FD & pf->hw.debug_mask)
514 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
515 pf->auto_disable_flags |=
516 I40E_FLAG_FD_SB_ENABLED;
519 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
520 if (I40E_DEBUG_FD & pf->hw.debug_mask)
521 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
522 rx_desc->wb.qword0.hi_dword.fd_id);
527 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
528 * @ring: the ring that owns the buffer
529 * @tx_buffer: the buffer to free
531 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
532 struct i40e_tx_buffer *tx_buffer)
534 if (tx_buffer->skb) {
535 dev_kfree_skb_any(tx_buffer->skb);
536 if (dma_unmap_len(tx_buffer, len))
537 dma_unmap_single(ring->dev,
538 dma_unmap_addr(tx_buffer, dma),
539 dma_unmap_len(tx_buffer, len),
541 } else if (dma_unmap_len(tx_buffer, len)) {
542 dma_unmap_page(ring->dev,
543 dma_unmap_addr(tx_buffer, dma),
544 dma_unmap_len(tx_buffer, len),
548 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
549 kfree(tx_buffer->raw_buf);
551 tx_buffer->next_to_watch = NULL;
552 tx_buffer->skb = NULL;
553 dma_unmap_len_set(tx_buffer, len, 0);
554 /* tx_buffer must be completely set up in the transmit path */
558 * i40e_clean_tx_ring - Free any empty Tx buffers
559 * @tx_ring: ring to be cleaned
561 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
563 unsigned long bi_size;
566 /* ring already cleared, nothing to do */
570 /* Free all the Tx ring sk_buffs */
571 for (i = 0; i < tx_ring->count; i++)
572 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
574 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
575 memset(tx_ring->tx_bi, 0, bi_size);
577 /* Zero out the descriptor ring */
578 memset(tx_ring->desc, 0, tx_ring->size);
580 tx_ring->next_to_use = 0;
581 tx_ring->next_to_clean = 0;
583 if (!tx_ring->netdev)
586 /* cleanup Tx queue statistics */
587 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
588 tx_ring->queue_index));
592 * i40e_free_tx_resources - Free Tx resources per queue
593 * @tx_ring: Tx descriptor ring for a specific queue
595 * Free all transmit software resources
597 void i40e_free_tx_resources(struct i40e_ring *tx_ring)
599 i40e_clean_tx_ring(tx_ring);
600 kfree(tx_ring->tx_bi);
601 tx_ring->tx_bi = NULL;
604 dma_free_coherent(tx_ring->dev, tx_ring->size,
605 tx_ring->desc, tx_ring->dma);
606 tx_ring->desc = NULL;
611 * i40e_get_tx_pending - how many tx descriptors not processed
612 * @tx_ring: the ring of descriptors
614 * Since there is no access to the ring head register
615 * in XL710, we need to use our local copies
617 u32 i40e_get_tx_pending(struct i40e_ring *ring)
621 head = i40e_get_head(ring);
622 tail = readl(ring->tail);
625 return (head < tail) ?
626 tail - head : (tail + ring->count - head);
631 #define WB_STRIDE 0x3
634 * i40e_clean_tx_irq - Reclaim resources after transmit completes
635 * @tx_ring: tx ring to clean
636 * @budget: how many cleans we're allowed
638 * Returns true if there's any budget left (e.g. the clean is finished)
640 static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
642 u16 i = tx_ring->next_to_clean;
643 struct i40e_tx_buffer *tx_buf;
644 struct i40e_tx_desc *tx_head;
645 struct i40e_tx_desc *tx_desc;
646 unsigned int total_packets = 0;
647 unsigned int total_bytes = 0;
649 tx_buf = &tx_ring->tx_bi[i];
650 tx_desc = I40E_TX_DESC(tx_ring, i);
653 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
656 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
658 /* if next_to_watch is not set then there is no work pending */
662 /* prevent any other reads prior to eop_desc */
663 read_barrier_depends();
665 /* we have caught up to head, no work left to do */
666 if (tx_head == tx_desc)
669 /* clear next_to_watch to prevent false hangs */
670 tx_buf->next_to_watch = NULL;
672 /* update the statistics for this packet */
673 total_bytes += tx_buf->bytecount;
674 total_packets += tx_buf->gso_segs;
677 dev_consume_skb_any(tx_buf->skb);
679 /* unmap skb header data */
680 dma_unmap_single(tx_ring->dev,
681 dma_unmap_addr(tx_buf, dma),
682 dma_unmap_len(tx_buf, len),
685 /* clear tx_buffer data */
687 dma_unmap_len_set(tx_buf, len, 0);
689 /* unmap remaining buffers */
690 while (tx_desc != eop_desc) {
697 tx_buf = tx_ring->tx_bi;
698 tx_desc = I40E_TX_DESC(tx_ring, 0);
701 /* unmap any remaining paged data */
702 if (dma_unmap_len(tx_buf, len)) {
703 dma_unmap_page(tx_ring->dev,
704 dma_unmap_addr(tx_buf, dma),
705 dma_unmap_len(tx_buf, len),
707 dma_unmap_len_set(tx_buf, len, 0);
711 /* move us one more past the eop_desc for start of next pkt */
717 tx_buf = tx_ring->tx_bi;
718 tx_desc = I40E_TX_DESC(tx_ring, 0);
723 /* update budget accounting */
725 } while (likely(budget));
728 tx_ring->next_to_clean = i;
729 u64_stats_update_begin(&tx_ring->syncp);
730 tx_ring->stats.bytes += total_bytes;
731 tx_ring->stats.packets += total_packets;
732 u64_stats_update_end(&tx_ring->syncp);
733 tx_ring->q_vector->tx.total_bytes += total_bytes;
734 tx_ring->q_vector->tx.total_packets += total_packets;
736 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
739 /* check to see if there are < 4 descriptors
740 * waiting to be written back, then kick the hardware to force
741 * them to be written back in case we stay in NAPI.
742 * In this mode on X722 we do not enable Interrupt.
744 j = i40e_get_tx_pending(tx_ring);
747 ((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
748 !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
749 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
750 tx_ring->arm_wb = true;
753 netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
754 tx_ring->queue_index),
755 total_packets, total_bytes);
757 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
758 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
759 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
760 /* Make sure that anybody stopping the queue after this
761 * sees the new next_to_clean.
764 if (__netif_subqueue_stopped(tx_ring->netdev,
765 tx_ring->queue_index) &&
766 !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
767 netif_wake_subqueue(tx_ring->netdev,
768 tx_ring->queue_index);
769 ++tx_ring->tx_stats.restart_queue;
777 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
778 * @vsi: the VSI we care about
779 * @q_vector: the vector on which to enable writeback
782 static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
783 struct i40e_q_vector *q_vector)
785 u16 flags = q_vector->tx.ring[0].flags;
788 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
791 if (q_vector->arm_wb_state)
794 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
795 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
796 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
799 I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
802 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
803 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
805 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
807 q_vector->arm_wb_state = true;
811 * i40e_force_wb - Issue SW Interrupt so HW does a wb
812 * @vsi: the VSI we care about
813 * @q_vector: the vector on which to force writeback
816 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
818 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
819 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
820 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
821 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
822 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
823 /* allow 00 to be written to the index */
826 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
827 vsi->base_vector - 1), val);
829 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
830 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
831 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
832 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
833 /* allow 00 to be written to the index */
835 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
840 * i40e_set_new_dynamic_itr - Find new ITR level
841 * @rc: structure containing ring performance data
843 * Returns true if ITR changed, false if not
845 * Stores a new ITR value based on packets and byte counts during
846 * the last interrupt. The advantage of per interrupt computation
847 * is faster updates and more accurate ITR for the current traffic
848 * pattern. Constants in this function were computed based on
849 * theoretical maximum wire speed and thresholds were set based on
850 * testing data as well as attempting to minimize response time
851 * while increasing bulk throughput.
853 static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
855 enum i40e_latency_range new_latency_range = rc->latency_range;
856 struct i40e_q_vector *qv = rc->ring->q_vector;
857 u32 new_itr = rc->itr;
861 if (rc->total_packets == 0 || !rc->itr)
864 /* simple throttlerate management
865 * 0-10MB/s lowest (50000 ints/s)
866 * 10-20MB/s low (20000 ints/s)
867 * 20-1249MB/s bulk (18000 ints/s)
868 * > 40000 Rx packets per second (8000 ints/s)
870 * The math works out because the divisor is in 10^(-6) which
871 * turns the bytes/us input value into MB/s values, but
872 * make sure to use usecs, as the register values written
873 * are in 2 usec increments in the ITR registers, and make sure
874 * to use the smoothed values that the countdown timer gives us.
876 usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
877 bytes_per_int = rc->total_bytes / usecs;
879 switch (new_latency_range) {
880 case I40E_LOWEST_LATENCY:
881 if (bytes_per_int > 10)
882 new_latency_range = I40E_LOW_LATENCY;
884 case I40E_LOW_LATENCY:
885 if (bytes_per_int > 20)
886 new_latency_range = I40E_BULK_LATENCY;
887 else if (bytes_per_int <= 10)
888 new_latency_range = I40E_LOWEST_LATENCY;
890 case I40E_BULK_LATENCY:
891 case I40E_ULTRA_LATENCY:
893 if (bytes_per_int <= 20)
894 new_latency_range = I40E_LOW_LATENCY;
898 /* this is to adjust RX more aggressively when streaming small
899 * packets. The value of 40000 was picked as it is just beyond
900 * what the hardware can receive per second if in low latency
903 #define RX_ULTRA_PACKET_RATE 40000
905 if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
907 new_latency_range = I40E_ULTRA_LATENCY;
909 rc->latency_range = new_latency_range;
911 switch (new_latency_range) {
912 case I40E_LOWEST_LATENCY:
913 new_itr = I40E_ITR_50K;
915 case I40E_LOW_LATENCY:
916 new_itr = I40E_ITR_20K;
918 case I40E_BULK_LATENCY:
919 new_itr = I40E_ITR_18K;
921 case I40E_ULTRA_LATENCY:
922 new_itr = I40E_ITR_8K;
929 rc->total_packets = 0;
931 if (new_itr != rc->itr) {
940 * i40e_clean_programming_status - clean the programming status descriptor
941 * @rx_ring: the rx ring that has this descriptor
942 * @rx_desc: the rx descriptor written back by HW
944 * Flow director should handle FD_FILTER_STATUS to check its filter programming
945 * status being successful or not and take actions accordingly. FCoE should
946 * handle its context/filter programming/invalidation status and take actions.
949 static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
950 union i40e_rx_desc *rx_desc)
955 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
956 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
957 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
959 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
960 i40e_fd_handle_status(rx_ring, rx_desc, id);
962 else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
963 (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
964 i40e_fcoe_handle_status(rx_ring, rx_desc, id);
969 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
970 * @tx_ring: the tx ring to set up
972 * Return 0 on success, negative on error
974 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
976 struct device *dev = tx_ring->dev;
982 /* warn if we are about to overwrite the pointer */
983 WARN_ON(tx_ring->tx_bi);
984 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
985 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
989 /* round up to nearest 4K */
990 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
991 /* add u32 for head writeback, align after this takes care of
992 * guaranteeing this is at least one cache line in size
994 tx_ring->size += sizeof(u32);
995 tx_ring->size = ALIGN(tx_ring->size, 4096);
996 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
997 &tx_ring->dma, GFP_KERNEL);
998 if (!tx_ring->desc) {
999 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1004 tx_ring->next_to_use = 0;
1005 tx_ring->next_to_clean = 0;
1009 kfree(tx_ring->tx_bi);
1010 tx_ring->tx_bi = NULL;
1015 * i40e_clean_rx_ring - Free Rx buffers
1016 * @rx_ring: ring to be cleaned
1018 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1020 struct device *dev = rx_ring->dev;
1021 struct i40e_rx_buffer *rx_bi;
1022 unsigned long bi_size;
1025 /* ring already cleared, nothing to do */
1026 if (!rx_ring->rx_bi)
1029 if (ring_is_ps_enabled(rx_ring)) {
1030 int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
1032 rx_bi = &rx_ring->rx_bi[0];
1033 if (rx_bi->hdr_buf) {
1034 dma_free_coherent(dev,
1038 for (i = 0; i < rx_ring->count; i++) {
1039 rx_bi = &rx_ring->rx_bi[i];
1041 rx_bi->hdr_buf = NULL;
1045 /* Free all the Rx ring sk_buffs */
1046 for (i = 0; i < rx_ring->count; i++) {
1047 rx_bi = &rx_ring->rx_bi[i];
1049 dma_unmap_single(dev,
1051 rx_ring->rx_buf_len,
1056 dev_kfree_skb(rx_bi->skb);
1060 if (rx_bi->page_dma) {
1065 rx_bi->page_dma = 0;
1067 __free_page(rx_bi->page);
1069 rx_bi->page_offset = 0;
1073 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1074 memset(rx_ring->rx_bi, 0, bi_size);
1076 /* Zero out the descriptor ring */
1077 memset(rx_ring->desc, 0, rx_ring->size);
1079 rx_ring->next_to_clean = 0;
1080 rx_ring->next_to_use = 0;
1084 * i40e_free_rx_resources - Free Rx resources
1085 * @rx_ring: ring to clean the resources from
1087 * Free all receive software resources
1089 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1091 i40e_clean_rx_ring(rx_ring);
1092 kfree(rx_ring->rx_bi);
1093 rx_ring->rx_bi = NULL;
1095 if (rx_ring->desc) {
1096 dma_free_coherent(rx_ring->dev, rx_ring->size,
1097 rx_ring->desc, rx_ring->dma);
1098 rx_ring->desc = NULL;
1103 * i40e_alloc_rx_headers - allocate rx header buffers
1104 * @rx_ring: ring to alloc buffers
1106 * Allocate rx header buffers for the entire ring. As these are static,
1107 * this is only called when setting up a new ring.
1109 void i40e_alloc_rx_headers(struct i40e_ring *rx_ring)
1111 struct device *dev = rx_ring->dev;
1112 struct i40e_rx_buffer *rx_bi;
1118 if (rx_ring->rx_bi[0].hdr_buf)
1120 /* Make sure the buffers don't cross cache line boundaries. */
1121 buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
1122 buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
1126 for (i = 0; i < rx_ring->count; i++) {
1127 rx_bi = &rx_ring->rx_bi[i];
1128 rx_bi->dma = dma + (i * buf_size);
1129 rx_bi->hdr_buf = buffer + (i * buf_size);
1134 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1135 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1137 * Returns 0 on success, negative on failure
1139 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1141 struct device *dev = rx_ring->dev;
1144 /* warn if we are about to overwrite the pointer */
1145 WARN_ON(rx_ring->rx_bi);
1146 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1147 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1148 if (!rx_ring->rx_bi)
1151 u64_stats_init(&rx_ring->syncp);
1153 /* Round up to nearest 4K */
1154 rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
1155 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
1156 : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1157 rx_ring->size = ALIGN(rx_ring->size, 4096);
1158 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1159 &rx_ring->dma, GFP_KERNEL);
1161 if (!rx_ring->desc) {
1162 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1167 rx_ring->next_to_clean = 0;
1168 rx_ring->next_to_use = 0;
1172 kfree(rx_ring->rx_bi);
1173 rx_ring->rx_bi = NULL;
1178 * i40e_release_rx_desc - Store the new tail and head values
1179 * @rx_ring: ring to bump
1180 * @val: new head index
1182 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1184 rx_ring->next_to_use = val;
1185 /* Force memory writes to complete before letting h/w
1186 * know there are new descriptors to fetch. (Only
1187 * applicable for weak-ordered memory model archs,
1191 writel(val, rx_ring->tail);
1195 * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
1196 * @rx_ring: ring to place buffers on
1197 * @cleaned_count: number of buffers to replace
1199 * Returns true if any errors on allocation
1201 bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
1203 u16 i = rx_ring->next_to_use;
1204 union i40e_rx_desc *rx_desc;
1205 struct i40e_rx_buffer *bi;
1206 const int current_node = numa_node_id();
1208 /* do nothing if no valid netdev defined */
1209 if (!rx_ring->netdev || !cleaned_count)
1212 while (cleaned_count--) {
1213 rx_desc = I40E_RX_DESC(rx_ring, i);
1214 bi = &rx_ring->rx_bi[i];
1216 if (bi->skb) /* desc is in use */
1219 /* If we've been moved to a different NUMA node, release the
1220 * page so we can get a new one on the current node.
1222 if (bi->page && page_to_nid(bi->page) != current_node) {
1223 dma_unmap_page(rx_ring->dev,
1227 __free_page(bi->page);
1230 rx_ring->rx_stats.realloc_count++;
1231 } else if (bi->page) {
1232 rx_ring->rx_stats.page_reuse_count++;
1236 bi->page = alloc_page(GFP_ATOMIC);
1238 rx_ring->rx_stats.alloc_page_failed++;
1241 bi->page_dma = dma_map_page(rx_ring->dev,
1246 if (dma_mapping_error(rx_ring->dev, bi->page_dma)) {
1247 rx_ring->rx_stats.alloc_page_failed++;
1248 __free_page(bi->page);
1251 bi->page_offset = 0;
1254 bi->page_offset = 0;
1257 /* Refresh the desc even if buffer_addrs didn't change
1258 * because each write-back erases this info.
1260 rx_desc->read.pkt_addr =
1261 cpu_to_le64(bi->page_dma + bi->page_offset);
1262 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1264 if (i == rx_ring->count)
1268 if (rx_ring->next_to_use != i)
1269 i40e_release_rx_desc(rx_ring, i);
1274 if (rx_ring->next_to_use != i)
1275 i40e_release_rx_desc(rx_ring, i);
1277 /* make sure to come back via polling to try again after
1278 * allocation failure
1284 * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
1285 * @rx_ring: ring to place buffers on
1286 * @cleaned_count: number of buffers to replace
1288 * Returns true if any errors on allocation
1290 bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
1292 u16 i = rx_ring->next_to_use;
1293 union i40e_rx_desc *rx_desc;
1294 struct i40e_rx_buffer *bi;
1295 struct sk_buff *skb;
1297 /* do nothing if no valid netdev defined */
1298 if (!rx_ring->netdev || !cleaned_count)
1301 while (cleaned_count--) {
1302 rx_desc = I40E_RX_DESC(rx_ring, i);
1303 bi = &rx_ring->rx_bi[i];
1307 skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
1308 rx_ring->rx_buf_len,
1312 rx_ring->rx_stats.alloc_buff_failed++;
1315 /* initialize queue mapping */
1316 skb_record_rx_queue(skb, rx_ring->queue_index);
1321 bi->dma = dma_map_single(rx_ring->dev,
1323 rx_ring->rx_buf_len,
1325 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
1326 rx_ring->rx_stats.alloc_buff_failed++;
1328 dev_kfree_skb(bi->skb);
1334 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
1335 rx_desc->read.hdr_addr = 0;
1337 if (i == rx_ring->count)
1341 if (rx_ring->next_to_use != i)
1342 i40e_release_rx_desc(rx_ring, i);
1347 if (rx_ring->next_to_use != i)
1348 i40e_release_rx_desc(rx_ring, i);
1350 /* make sure to come back via polling to try again after
1351 * allocation failure
1357 * i40e_receive_skb - Send a completed packet up the stack
1358 * @rx_ring: rx ring in play
1359 * @skb: packet to send up
1360 * @vlan_tag: vlan tag for packet
1362 static void i40e_receive_skb(struct i40e_ring *rx_ring,
1363 struct sk_buff *skb, u16 vlan_tag)
1365 struct i40e_q_vector *q_vector = rx_ring->q_vector;
1367 if (vlan_tag & VLAN_VID_MASK)
1368 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1370 napi_gro_receive(&q_vector->napi, skb);
1374 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1375 * @vsi: the VSI we care about
1376 * @skb: skb currently being received and modified
1377 * @rx_status: status value of last descriptor in packet
1378 * @rx_error: error value of last descriptor in packet
1379 * @rx_ptype: ptype value of last descriptor in packet
1381 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1382 struct sk_buff *skb,
1387 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
1388 bool ipv4 = false, ipv6 = false;
1389 bool ipv4_tunnel, ipv6_tunnel;
1394 ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
1395 (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
1396 ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
1397 (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
1399 skb->ip_summed = CHECKSUM_NONE;
1401 /* Rx csum enabled and ip headers found? */
1402 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1405 /* did the hardware decode the packet and checksum? */
1406 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1409 /* both known and outer_ip must be set for the below code to work */
1410 if (!(decoded.known && decoded.outer_ip))
1413 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1414 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
1416 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1417 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
1421 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1422 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1425 /* likely incorrect csum if alternate IP extension headers found */
1427 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1428 /* don't increment checksum err here, non-fatal err */
1431 /* there was some L4 error, count error and punt packet to the stack */
1432 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1435 /* handle packets that were not able to be checksummed due
1436 * to arrival speed, in this case the stack can compute
1439 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1442 /* If VXLAN/GENEVE traffic has an outer UDPv4 checksum we need to check
1443 * it in the driver, hardware does not do it for us.
1444 * Since L3L4P bit was set we assume a valid IHL value (>=5)
1445 * so the total length of IPv4 header is IHL*4 bytes
1446 * The UDP_0 bit *may* bet set if the *inner* header is UDP
1448 if (!(vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) &&
1450 skb->transport_header = skb->mac_header +
1451 sizeof(struct ethhdr) +
1452 (ip_hdr(skb)->ihl * 4);
1454 /* Add 4 bytes for VLAN tagged packets */
1455 skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
1456 skb->protocol == htons(ETH_P_8021AD))
1459 if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
1460 (udp_hdr(skb)->check != 0)) {
1461 rx_udp_csum = udp_csum(skb);
1463 csum = csum_tcpudp_magic(
1464 iph->saddr, iph->daddr,
1465 (skb->len - skb_transport_offset(skb)),
1466 IPPROTO_UDP, rx_udp_csum);
1468 if (udp_hdr(skb)->check != csum)
1471 } /* else its GRE and so no outer UDP header */
1474 skb->ip_summed = CHECKSUM_UNNECESSARY;
1475 skb->csum_level = ipv4_tunnel || ipv6_tunnel;
1480 vsi->back->hw_csum_rx_error++;
1484 * i40e_ptype_to_htype - get a hash type
1485 * @ptype: the ptype value from the descriptor
1487 * Returns a hash type to be used by skb_set_hash
1489 static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
1491 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1494 return PKT_HASH_TYPE_NONE;
1496 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1497 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1498 return PKT_HASH_TYPE_L4;
1499 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1500 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1501 return PKT_HASH_TYPE_L3;
1503 return PKT_HASH_TYPE_L2;
1507 * i40e_rx_hash - set the hash value in the skb
1508 * @ring: descriptor ring
1509 * @rx_desc: specific descriptor
1511 static inline void i40e_rx_hash(struct i40e_ring *ring,
1512 union i40e_rx_desc *rx_desc,
1513 struct sk_buff *skb,
1517 const __le64 rss_mask =
1518 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1519 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1521 if (ring->netdev->features & NETIF_F_RXHASH)
1524 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1525 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1526 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1531 * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
1532 * @rx_ring: rx ring to clean
1533 * @budget: how many cleans we're allowed
1535 * Returns true if there's any budget left (e.g. the clean is finished)
1537 static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget)
1539 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1540 u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
1541 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1542 struct i40e_vsi *vsi = rx_ring->vsi;
1543 u16 i = rx_ring->next_to_clean;
1544 union i40e_rx_desc *rx_desc;
1545 u32 rx_error, rx_status;
1546 bool failure = false;
1555 struct i40e_rx_buffer *rx_bi;
1556 struct sk_buff *skb;
1558 /* return some buffers to hardware, one at a time is too slow */
1559 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1560 failure = failure ||
1561 i40e_alloc_rx_buffers_ps(rx_ring,
1566 i = rx_ring->next_to_clean;
1567 rx_desc = I40E_RX_DESC(rx_ring, i);
1568 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1569 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1570 I40E_RXD_QW1_STATUS_SHIFT;
1572 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
1575 /* This memory barrier is needed to keep us from reading
1576 * any other fields out of the rx_desc until we know the
1580 /* sync header buffer for reading */
1581 dma_sync_single_range_for_cpu(rx_ring->dev,
1582 rx_ring->rx_bi[0].dma,
1583 i * rx_ring->rx_hdr_len,
1584 rx_ring->rx_hdr_len,
1586 if (i40e_rx_is_programming_status(qword)) {
1587 i40e_clean_programming_status(rx_ring, rx_desc);
1588 I40E_RX_INCREMENT(rx_ring, i);
1591 rx_bi = &rx_ring->rx_bi[i];
1594 skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
1595 rx_ring->rx_hdr_len,
1599 rx_ring->rx_stats.alloc_buff_failed++;
1604 /* initialize queue mapping */
1605 skb_record_rx_queue(skb, rx_ring->queue_index);
1606 /* we are reusing so sync this buffer for CPU use */
1607 dma_sync_single_range_for_cpu(rx_ring->dev,
1608 rx_ring->rx_bi[0].dma,
1609 i * rx_ring->rx_hdr_len,
1610 rx_ring->rx_hdr_len,
1613 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1614 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1615 rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
1616 I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
1617 rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
1618 I40E_RXD_QW1_LENGTH_SPH_SHIFT;
1620 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1621 I40E_RXD_QW1_ERROR_SHIFT;
1622 rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1623 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1625 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1626 I40E_RXD_QW1_PTYPE_SHIFT;
1627 /* sync half-page for reading */
1628 dma_sync_single_range_for_cpu(rx_ring->dev,
1633 prefetch(page_address(rx_bi->page) + rx_bi->page_offset);
1637 if (rx_hbo || rx_sph) {
1641 len = I40E_RX_HDR_SIZE;
1643 len = rx_header_len;
1644 memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
1645 } else if (skb->len == 0) {
1647 unsigned char *va = page_address(rx_bi->page) +
1650 len = min(rx_packet_len, rx_ring->rx_hdr_len);
1651 memcpy(__skb_put(skb, len), va, len);
1653 rx_packet_len -= len;
1655 /* Get the rest of the data if this was a header split */
1656 if (rx_packet_len) {
1657 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1659 rx_bi->page_offset + copysize,
1660 rx_packet_len, I40E_RXBUFFER_2048);
1662 get_page(rx_bi->page);
1663 /* switch to the other half-page here; the allocation
1664 * code programs the right addr into HW. If we haven't
1665 * used this half-page, the address won't be changed,
1666 * and HW can just use it next time through.
1668 rx_bi->page_offset ^= PAGE_SIZE / 2;
1669 /* If the page count is more than 2, then both halves
1670 * of the page are used and we need to free it. Do it
1671 * here instead of in the alloc code. Otherwise one
1672 * of the half-pages might be released between now and
1673 * then, and we wouldn't know which one to use.
1675 if (page_count(rx_bi->page) > 2) {
1676 dma_unmap_page(rx_ring->dev,
1680 __free_page(rx_bi->page);
1682 rx_bi->page_dma = 0;
1683 rx_ring->rx_stats.realloc_count++;
1687 I40E_RX_INCREMENT(rx_ring, i);
1690 !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1691 struct i40e_rx_buffer *next_buffer;
1693 next_buffer = &rx_ring->rx_bi[i];
1694 next_buffer->skb = skb;
1695 rx_ring->rx_stats.non_eop_descs++;
1699 /* ERR_MASK will only have valid bits if EOP set */
1700 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1701 dev_kfree_skb_any(skb);
1705 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1707 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1708 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1709 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1710 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1711 rx_ring->last_rx_timestamp = jiffies;
1714 /* probably a little skewed due to removing CRC */
1715 total_rx_bytes += skb->len;
1718 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1720 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1722 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1723 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1726 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1727 dev_kfree_skb_any(skb);
1731 i40e_receive_skb(rx_ring, skb, vlan_tag);
1733 rx_desc->wb.qword1.status_error_len = 0;
1735 } while (likely(total_rx_packets < budget));
1737 u64_stats_update_begin(&rx_ring->syncp);
1738 rx_ring->stats.packets += total_rx_packets;
1739 rx_ring->stats.bytes += total_rx_bytes;
1740 u64_stats_update_end(&rx_ring->syncp);
1741 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1742 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1744 return failure ? budget : total_rx_packets;
1748 * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
1749 * @rx_ring: rx ring to clean
1750 * @budget: how many cleans we're allowed
1752 * Returns number of packets cleaned
1754 static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
1756 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1757 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1758 struct i40e_vsi *vsi = rx_ring->vsi;
1759 union i40e_rx_desc *rx_desc;
1760 u32 rx_error, rx_status;
1762 bool failure = false;
1768 struct i40e_rx_buffer *rx_bi;
1769 struct sk_buff *skb;
1771 /* return some buffers to hardware, one at a time is too slow */
1772 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1773 failure = failure ||
1774 i40e_alloc_rx_buffers_1buf(rx_ring,
1779 i = rx_ring->next_to_clean;
1780 rx_desc = I40E_RX_DESC(rx_ring, i);
1781 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1782 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1783 I40E_RXD_QW1_STATUS_SHIFT;
1785 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
1788 /* This memory barrier is needed to keep us from reading
1789 * any other fields out of the rx_desc until we know the
1794 if (i40e_rx_is_programming_status(qword)) {
1795 i40e_clean_programming_status(rx_ring, rx_desc);
1796 I40E_RX_INCREMENT(rx_ring, i);
1799 rx_bi = &rx_ring->rx_bi[i];
1801 prefetch(skb->data);
1803 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1804 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1806 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1807 I40E_RXD_QW1_ERROR_SHIFT;
1808 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1810 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1811 I40E_RXD_QW1_PTYPE_SHIFT;
1815 /* Get the header and possibly the whole packet
1816 * If this is an skb from previous receive dma will be 0
1818 skb_put(skb, rx_packet_len);
1819 dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
1823 I40E_RX_INCREMENT(rx_ring, i);
1826 !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1827 rx_ring->rx_stats.non_eop_descs++;
1831 /* ERR_MASK will only have valid bits if EOP set */
1832 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1833 dev_kfree_skb_any(skb);
1837 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1838 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1839 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1840 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1841 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1842 rx_ring->last_rx_timestamp = jiffies;
1845 /* probably a little skewed due to removing CRC */
1846 total_rx_bytes += skb->len;
1849 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1851 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1853 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1854 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1857 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1858 dev_kfree_skb_any(skb);
1862 i40e_receive_skb(rx_ring, skb, vlan_tag);
1864 rx_desc->wb.qword1.status_error_len = 0;
1865 } while (likely(total_rx_packets < budget));
1867 u64_stats_update_begin(&rx_ring->syncp);
1868 rx_ring->stats.packets += total_rx_packets;
1869 rx_ring->stats.bytes += total_rx_bytes;
1870 u64_stats_update_end(&rx_ring->syncp);
1871 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1872 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1874 return failure ? budget : total_rx_packets;
1877 static u32 i40e_buildreg_itr(const int type, const u16 itr)
1881 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1882 /* Don't clear PBA because that can cause lost interrupts that
1883 * came in while we were cleaning/polling
1885 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1886 (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
1891 /* a small macro to shorten up some long lines */
1892 #define INTREG I40E_PFINT_DYN_CTLN
1895 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
1896 * @vsi: the VSI we care about
1897 * @q_vector: q_vector for which itr is being updated and interrupt enabled
1900 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
1901 struct i40e_q_vector *q_vector)
1903 struct i40e_hw *hw = &vsi->back->hw;
1904 bool rx = false, tx = false;
1908 vector = (q_vector->v_idx + vsi->base_vector);
1910 /* avoid dynamic calculation if in countdown mode OR if
1911 * all dynamic is disabled
1913 rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
1915 if (q_vector->itr_countdown > 0 ||
1916 (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) &&
1917 !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) {
1921 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
1922 rx = i40e_set_new_dynamic_itr(&q_vector->rx);
1923 rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
1926 if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
1927 tx = i40e_set_new_dynamic_itr(&q_vector->tx);
1928 txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
1932 /* get the higher of the two ITR adjustments and
1933 * use the same value for both ITR registers
1934 * when in adaptive mode (Rx and/or Tx)
1936 u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
1938 q_vector->tx.itr = q_vector->rx.itr = itr;
1939 txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
1941 rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
1945 /* only need to enable the interrupt once, but need
1946 * to possibly update both ITR values
1949 /* set the INTENA_MSK_MASK so that this first write
1950 * won't actually enable the interrupt, instead just
1951 * updating the ITR (it's bit 31 PF and VF)
1954 /* don't check _DOWN because interrupt isn't being enabled */
1955 wr32(hw, INTREG(vector - 1), rxval);
1959 if (!test_bit(__I40E_DOWN, &vsi->state))
1960 wr32(hw, INTREG(vector - 1), txval);
1962 if (q_vector->itr_countdown)
1963 q_vector->itr_countdown--;
1965 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1969 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
1970 * @napi: napi struct with our devices info in it
1971 * @budget: amount of work driver is allowed to do this pass, in packets
1973 * This function will clean all queues associated with a q_vector.
1975 * Returns the amount of work done
1977 int i40e_napi_poll(struct napi_struct *napi, int budget)
1979 struct i40e_q_vector *q_vector =
1980 container_of(napi, struct i40e_q_vector, napi);
1981 struct i40e_vsi *vsi = q_vector->vsi;
1982 struct i40e_ring *ring;
1983 bool clean_complete = true;
1984 bool arm_wb = false;
1985 int budget_per_ring;
1988 if (test_bit(__I40E_DOWN, &vsi->state)) {
1989 napi_complete(napi);
1993 /* Clear hung_detected bit */
1994 clear_bit(I40E_Q_VECTOR_HUNG_DETECT, &q_vector->hung_detected);
1995 /* Since the actual Tx work is minimal, we can give the Tx a larger
1996 * budget and be more aggressive about cleaning up the Tx descriptors.
1998 i40e_for_each_ring(ring, q_vector->tx) {
1999 clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
2000 arm_wb = arm_wb || ring->arm_wb;
2001 ring->arm_wb = false;
2004 /* Handle case where we are called by netpoll with a budget of 0 */
2008 /* We attempt to distribute budget to each Rx queue fairly, but don't
2009 * allow the budget to go below 1 because that would exit polling early.
2011 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
2013 i40e_for_each_ring(ring, q_vector->rx) {
2016 if (ring_is_ps_enabled(ring))
2017 cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
2019 cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
2021 work_done += cleaned;
2022 /* if we didn't clean as many as budgeted, we must be done */
2023 clean_complete &= (budget_per_ring != cleaned);
2026 /* If work not completed, return budget and polling will return */
2027 if (!clean_complete) {
2030 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
2031 i40e_enable_wb_on_itr(vsi, q_vector);
2036 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
2037 q_vector->arm_wb_state = false;
2039 /* Work is done so exit the polling mode and re-enable the interrupt */
2040 napi_complete_done(napi, work_done);
2041 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
2042 i40e_update_enable_itr(vsi, q_vector);
2043 } else { /* Legacy mode */
2044 struct i40e_hw *hw = &vsi->back->hw;
2045 /* We re-enable the queue 0 cause, but
2046 * don't worry about dynamic_enable
2047 * because we left it on for the other
2048 * possible interrupts during napi
2050 u32 qval = rd32(hw, I40E_QINT_RQCTL(0)) |
2051 I40E_QINT_RQCTL_CAUSE_ENA_MASK;
2053 wr32(hw, I40E_QINT_RQCTL(0), qval);
2054 qval = rd32(hw, I40E_QINT_TQCTL(0)) |
2055 I40E_QINT_TQCTL_CAUSE_ENA_MASK;
2056 wr32(hw, I40E_QINT_TQCTL(0), qval);
2057 i40e_irq_dynamic_enable_icr0(vsi->back, false);
2063 * i40e_atr - Add a Flow Director ATR filter
2064 * @tx_ring: ring to add programming descriptor to
2066 * @tx_flags: send tx flags
2067 * @protocol: wire protocol
2069 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2070 u32 tx_flags, __be16 protocol)
2072 struct i40e_filter_program_desc *fdir_desc;
2073 struct i40e_pf *pf = tx_ring->vsi->back;
2075 unsigned char *network;
2077 struct ipv6hdr *ipv6;
2081 u32 flex_ptype, dtype_cmd;
2084 /* make sure ATR is enabled */
2085 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
2088 if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
2091 /* if sampling is disabled do nothing */
2092 if (!tx_ring->atr_sample_rate)
2095 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
2098 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL)) {
2099 /* snag network header to get L4 type and address */
2100 hdr.network = skb_network_header(skb);
2102 /* Currently only IPv4/IPv6 with TCP is supported
2103 * access ihl as u8 to avoid unaligned access on ia64
2105 if (tx_flags & I40E_TX_FLAGS_IPV4)
2106 hlen = (hdr.network[0] & 0x0F) << 2;
2107 else if (protocol == htons(ETH_P_IPV6))
2108 hlen = sizeof(struct ipv6hdr);
2112 hdr.network = skb_inner_network_header(skb);
2113 hlen = skb_inner_network_header_len(skb);
2116 /* Currently only IPv4/IPv6 with TCP is supported
2117 * Note: tx_flags gets modified to reflect inner protocols in
2118 * tx_enable_csum function if encap is enabled.
2120 if ((tx_flags & I40E_TX_FLAGS_IPV4) &&
2121 (hdr.ipv4->protocol != IPPROTO_TCP))
2123 else if ((tx_flags & I40E_TX_FLAGS_IPV6) &&
2124 (hdr.ipv6->nexthdr != IPPROTO_TCP))
2127 th = (struct tcphdr *)(hdr.network + hlen);
2129 /* Due to lack of space, no more new filters can be programmed */
2130 if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
2132 if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
2133 (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))) {
2134 /* HW ATR eviction will take care of removing filters on FIN
2137 if (th->fin || th->rst)
2141 tx_ring->atr_count++;
2143 /* sample on all syn/fin/rst packets or once every atr sample rate */
2147 (tx_ring->atr_count < tx_ring->atr_sample_rate))
2150 tx_ring->atr_count = 0;
2152 /* grab the next descriptor */
2153 i = tx_ring->next_to_use;
2154 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2157 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2159 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2160 I40E_TXD_FLTR_QW0_QINDEX_MASK;
2161 flex_ptype |= (protocol == htons(ETH_P_IP)) ?
2162 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2163 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2164 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2165 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2167 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2169 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2171 dtype_cmd |= (th->fin || th->rst) ?
2172 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2173 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2174 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2175 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2177 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2178 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2180 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2181 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2183 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2184 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
2186 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2187 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2188 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2191 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2192 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2193 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2195 if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
2196 (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)))
2197 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2199 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2200 fdir_desc->rsvd = cpu_to_le32(0);
2201 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2202 fdir_desc->fd_id = cpu_to_le32(0);
2206 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2208 * @tx_ring: ring to send buffer on
2209 * @flags: the tx flags to be set
2211 * Checks the skb and set up correspondingly several generic transmit flags
2212 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2214 * Returns error code indicate the frame should be dropped upon error and the
2215 * otherwise returns 0 to indicate the flags has been set properly.
2218 inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2219 struct i40e_ring *tx_ring,
2222 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2223 struct i40e_ring *tx_ring,
2227 __be16 protocol = skb->protocol;
2230 if (protocol == htons(ETH_P_8021Q) &&
2231 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2232 /* When HW VLAN acceleration is turned off by the user the
2233 * stack sets the protocol to 8021q so that the driver
2234 * can take any steps required to support the SW only
2235 * VLAN handling. In our case the driver doesn't need
2236 * to take any further steps so just set the protocol
2237 * to the encapsulated ethertype.
2239 skb->protocol = vlan_get_protocol(skb);
2243 /* if we have a HW VLAN tag being added, default to the HW one */
2244 if (skb_vlan_tag_present(skb)) {
2245 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2246 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2247 /* else if it is a SW VLAN, check the next protocol and store the tag */
2248 } else if (protocol == htons(ETH_P_8021Q)) {
2249 struct vlan_hdr *vhdr, _vhdr;
2251 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2255 protocol = vhdr->h_vlan_encapsulated_proto;
2256 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2257 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2260 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2263 /* Insert 802.1p priority into VLAN header */
2264 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2265 (skb->priority != TC_PRIO_CONTROL)) {
2266 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2267 tx_flags |= (skb->priority & 0x7) <<
2268 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2269 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2270 struct vlan_ethhdr *vhdr;
2273 rc = skb_cow_head(skb, 0);
2276 vhdr = (struct vlan_ethhdr *)skb->data;
2277 vhdr->h_vlan_TCI = htons(tx_flags >>
2278 I40E_TX_FLAGS_VLAN_SHIFT);
2280 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2290 * i40e_tso - set up the tso context descriptor
2291 * @tx_ring: ptr to the ring to send
2292 * @skb: ptr to the skb we're sending
2293 * @hdr_len: ptr to the size of the packet header
2294 * @cd_type_cmd_tso_mss: Quad Word 1
2296 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2298 static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
2299 u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
2301 u32 cd_cmd, cd_tso_len, cd_mss;
2302 struct ipv6hdr *ipv6h;
2303 struct tcphdr *tcph;
2308 if (skb->ip_summed != CHECKSUM_PARTIAL)
2311 if (!skb_is_gso(skb))
2314 err = skb_cow_head(skb, 0);
2318 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
2319 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
2321 if (iph->version == 4) {
2322 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
2325 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
2327 } else if (ipv6h->version == 6) {
2328 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
2329 ipv6h->payload_len = 0;
2330 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
2334 l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
2335 *hdr_len = (skb->encapsulation
2336 ? (skb_inner_transport_header(skb) - skb->data)
2337 : skb_transport_offset(skb)) + l4len;
2339 /* find the field values */
2340 cd_cmd = I40E_TX_CTX_DESC_TSO;
2341 cd_tso_len = skb->len - *hdr_len;
2342 cd_mss = skb_shinfo(skb)->gso_size;
2343 *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2345 I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2346 ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
2351 * i40e_tsyn - set up the tsyn context descriptor
2352 * @tx_ring: ptr to the ring to send
2353 * @skb: ptr to the skb we're sending
2354 * @tx_flags: the collected send information
2355 * @cd_type_cmd_tso_mss: Quad Word 1
2357 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2359 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2360 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2364 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2367 /* Tx timestamps cannot be sampled when doing TSO */
2368 if (tx_flags & I40E_TX_FLAGS_TSO)
2371 /* only timestamp the outbound packet if the user has requested it and
2372 * we are not already transmitting a packet to be timestamped
2374 pf = i40e_netdev_to_pf(tx_ring->netdev);
2375 if (!(pf->flags & I40E_FLAG_PTP))
2379 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
2380 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2381 pf->ptp_tx_skb = skb_get(skb);
2386 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
2387 I40E_TXD_CTX_QW1_CMD_SHIFT;
2393 * i40e_tx_enable_csum - Enable Tx checksum offloads
2395 * @tx_flags: pointer to Tx flags currently set
2396 * @td_cmd: Tx descriptor command bits to set
2397 * @td_offset: Tx descriptor header offsets to set
2398 * @tx_ring: Tx descriptor ring
2399 * @cd_tunneling: ptr to context desc bits
2401 static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
2402 u32 *td_cmd, u32 *td_offset,
2403 struct i40e_ring *tx_ring,
2406 struct ipv6hdr *this_ipv6_hdr;
2407 unsigned int this_tcp_hdrlen;
2408 struct iphdr *this_ip_hdr;
2409 u32 network_hdr_len;
2411 struct udphdr *oudph = NULL;
2412 struct iphdr *oiph = NULL;
2415 if (skb->encapsulation) {
2416 switch (ip_hdr(skb)->protocol) {
2418 oudph = udp_hdr(skb);
2420 l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
2421 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
2424 l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING;
2429 network_hdr_len = skb_inner_network_header_len(skb);
2430 this_ip_hdr = inner_ip_hdr(skb);
2431 this_ipv6_hdr = inner_ipv6_hdr(skb);
2432 this_tcp_hdrlen = inner_tcp_hdrlen(skb);
2434 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2435 if (*tx_flags & I40E_TX_FLAGS_TSO) {
2436 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
2437 ip_hdr(skb)->check = 0;
2440 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2442 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2443 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
2444 if (*tx_flags & I40E_TX_FLAGS_TSO)
2445 ip_hdr(skb)->check = 0;
2448 /* Now set the ctx descriptor fields */
2449 *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
2450 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
2452 ((skb_inner_network_offset(skb) -
2453 skb_transport_offset(skb)) >> 1) <<
2454 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
2455 if (this_ip_hdr->version == 6) {
2456 *tx_flags &= ~I40E_TX_FLAGS_IPV4;
2457 *tx_flags |= I40E_TX_FLAGS_IPV6;
2459 if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) &&
2460 (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) &&
2461 (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) {
2462 oudph->check = ~csum_tcpudp_magic(oiph->saddr,
2464 (skb->len - skb_transport_offset(skb)),
2466 *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
2469 network_hdr_len = skb_network_header_len(skb);
2470 this_ip_hdr = ip_hdr(skb);
2471 this_ipv6_hdr = ipv6_hdr(skb);
2472 this_tcp_hdrlen = tcp_hdrlen(skb);
2475 /* Enable IP checksum offloads */
2476 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2477 l4_hdr = this_ip_hdr->protocol;
2478 /* the stack computes the IP header already, the only time we
2479 * need the hardware to recompute it is in the case of TSO.
2481 if (*tx_flags & I40E_TX_FLAGS_TSO) {
2482 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
2483 this_ip_hdr->check = 0;
2485 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
2487 /* Now set the td_offset for IP header length */
2488 *td_offset = (network_hdr_len >> 2) <<
2489 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2490 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2491 l4_hdr = this_ipv6_hdr->nexthdr;
2492 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
2493 /* Now set the td_offset for IP header length */
2494 *td_offset = (network_hdr_len >> 2) <<
2495 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2497 /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
2498 *td_offset |= (skb_network_offset(skb) >> 1) <<
2499 I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
2501 /* Enable L4 checksum offloads */
2504 /* enable checksum offloads */
2505 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
2506 *td_offset |= (this_tcp_hdrlen >> 2) <<
2507 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2510 /* enable SCTP checksum offload */
2511 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
2512 *td_offset |= (sizeof(struct sctphdr) >> 2) <<
2513 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2516 /* enable UDP checksum offload */
2517 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
2518 *td_offset |= (sizeof(struct udphdr) >> 2) <<
2519 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2527 * i40e_create_tx_ctx Build the Tx context descriptor
2528 * @tx_ring: ring to create the descriptor on
2529 * @cd_type_cmd_tso_mss: Quad Word 1
2530 * @cd_tunneling: Quad Word 0 - bits 0-31
2531 * @cd_l2tag2: Quad Word 0 - bits 32-63
2533 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
2534 const u64 cd_type_cmd_tso_mss,
2535 const u32 cd_tunneling, const u32 cd_l2tag2)
2537 struct i40e_tx_context_desc *context_desc;
2538 int i = tx_ring->next_to_use;
2540 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
2541 !cd_tunneling && !cd_l2tag2)
2544 /* grab the next descriptor */
2545 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
2548 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2550 /* cpu_to_le32 and assign to struct fields */
2551 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2552 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
2553 context_desc->rsvd = cpu_to_le16(0);
2554 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2558 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2559 * @tx_ring: the ring to be checked
2560 * @size: the size buffer we want to assure is available
2562 * Returns -EBUSY if a stop is needed, else 0
2564 static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2566 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2567 /* Memory barrier before checking head and tail */
2570 /* Check again in a case another CPU has just made room available. */
2571 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2574 /* A reprieve! - use start_queue because it doesn't call schedule */
2575 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2576 ++tx_ring->tx_stats.restart_queue;
2581 * i40e_maybe_stop_tx - 1st level check for tx stop conditions
2582 * @tx_ring: the ring to be checked
2583 * @size: the size buffer we want to assure is available
2585 * Returns 0 if stop is not needed
2588 inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2590 static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2593 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
2595 return __i40e_maybe_stop_tx(tx_ring, size);
2599 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
2601 * @tx_flags: collected send information
2603 * Note: Our HW can't scatter-gather more than 8 fragments to build
2604 * a packet on the wire and so we need to figure out the cases where we
2605 * need to linearize the skb.
2607 static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
2609 struct skb_frag_struct *frag;
2610 bool linearize = false;
2611 unsigned int size = 0;
2615 num_frags = skb_shinfo(skb)->nr_frags;
2616 gso_segs = skb_shinfo(skb)->gso_segs;
2618 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
2621 if (num_frags < (I40E_MAX_BUFFER_TXD))
2622 goto linearize_chk_done;
2623 /* try the simple math, if we have too many frags per segment */
2624 if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
2625 I40E_MAX_BUFFER_TXD) {
2627 goto linearize_chk_done;
2629 frag = &skb_shinfo(skb)->frags[0];
2630 /* we might still have more fragments per segment */
2632 size += skb_frag_size(frag);
2634 if ((size >= skb_shinfo(skb)->gso_size) &&
2635 (j < I40E_MAX_BUFFER_TXD)) {
2636 size = (size % skb_shinfo(skb)->gso_size);
2639 if (j == I40E_MAX_BUFFER_TXD) {
2644 } while (num_frags);
2646 if (num_frags >= I40E_MAX_BUFFER_TXD)
2655 * i40e_tx_map - Build the Tx descriptor
2656 * @tx_ring: ring to send buffer on
2658 * @first: first buffer info buffer to use
2659 * @tx_flags: collected send information
2660 * @hdr_len: size of the packet header
2661 * @td_cmd: the command field in the descriptor
2662 * @td_offset: offset for checksum or crc
2665 inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2666 struct i40e_tx_buffer *first, u32 tx_flags,
2667 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2669 static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2670 struct i40e_tx_buffer *first, u32 tx_flags,
2671 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2674 unsigned int data_len = skb->data_len;
2675 unsigned int size = skb_headlen(skb);
2676 struct skb_frag_struct *frag;
2677 struct i40e_tx_buffer *tx_bi;
2678 struct i40e_tx_desc *tx_desc;
2679 u16 i = tx_ring->next_to_use;
2684 bool tail_bump = true;
2687 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
2688 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
2689 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
2690 I40E_TX_FLAGS_VLAN_SHIFT;
2693 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
2694 gso_segs = skb_shinfo(skb)->gso_segs;
2698 /* multiply data chunks by size of headers */
2699 first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
2700 first->gso_segs = gso_segs;
2702 first->tx_flags = tx_flags;
2704 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2706 tx_desc = I40E_TX_DESC(tx_ring, i);
2709 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2710 if (dma_mapping_error(tx_ring->dev, dma))
2713 /* record length, and DMA address */
2714 dma_unmap_len_set(tx_bi, len, size);
2715 dma_unmap_addr_set(tx_bi, dma, dma);
2717 tx_desc->buffer_addr = cpu_to_le64(dma);
2719 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
2720 tx_desc->cmd_type_offset_bsz =
2721 build_ctob(td_cmd, td_offset,
2722 I40E_MAX_DATA_PER_TXD, td_tag);
2728 if (i == tx_ring->count) {
2729 tx_desc = I40E_TX_DESC(tx_ring, 0);
2733 dma += I40E_MAX_DATA_PER_TXD;
2734 size -= I40E_MAX_DATA_PER_TXD;
2736 tx_desc->buffer_addr = cpu_to_le64(dma);
2739 if (likely(!data_len))
2742 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2749 if (i == tx_ring->count) {
2750 tx_desc = I40E_TX_DESC(tx_ring, 0);
2754 size = skb_frag_size(frag);
2757 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2760 tx_bi = &tx_ring->tx_bi[i];
2763 /* set next_to_watch value indicating a packet is present */
2764 first->next_to_watch = tx_desc;
2767 if (i == tx_ring->count)
2770 tx_ring->next_to_use = i;
2772 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
2773 tx_ring->queue_index),
2775 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
2777 /* Algorithm to optimize tail and RS bit setting:
2778 * if xmit_more is supported
2779 * if xmit_more is true
2780 * do not update tail and do not mark RS bit.
2781 * if xmit_more is false and last xmit_more was false
2782 * if every packet spanned less than 4 desc
2783 * then set RS bit on 4th packet and update tail
2786 * update tail and set RS bit on every packet.
2787 * if xmit_more is false and last_xmit_more was true
2788 * update tail and set RS bit.
2790 * Optimization: wmb to be issued only in case of tail update.
2791 * Also optimize the Descriptor WB path for RS bit with the same
2794 * Note: If there are less than 4 packets
2795 * pending and interrupts were disabled the service task will
2796 * trigger a force WB.
2798 if (skb->xmit_more &&
2799 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2800 tx_ring->queue_index))) {
2801 tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2803 } else if (!skb->xmit_more &&
2804 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2805 tx_ring->queue_index)) &&
2806 (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
2807 (tx_ring->packet_stride < WB_STRIDE) &&
2808 (desc_count < WB_STRIDE)) {
2809 tx_ring->packet_stride++;
2811 tx_ring->packet_stride = 0;
2812 tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2816 tx_ring->packet_stride = 0;
2818 tx_desc->cmd_type_offset_bsz =
2819 build_ctob(td_cmd, td_offset, size, td_tag) |
2820 cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
2821 I40E_TX_DESC_CMD_EOP) <<
2822 I40E_TXD_QW1_CMD_SHIFT);
2824 /* notify HW of packet */
2826 prefetchw(tx_desc + 1);
2829 /* Force memory writes to complete before letting h/w
2830 * know there are new descriptors to fetch. (Only
2831 * applicable for weak-ordered memory model archs,
2835 writel(i, tx_ring->tail);
2841 dev_info(tx_ring->dev, "TX DMA map failed\n");
2843 /* clear dma mappings for failed tx_bi map */
2845 tx_bi = &tx_ring->tx_bi[i];
2846 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
2854 tx_ring->next_to_use = i;
2858 * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
2860 * @tx_ring: ring to send buffer on
2862 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
2863 * there is not enough descriptors available in this ring since we need at least
2867 inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
2868 struct i40e_ring *tx_ring)
2870 static inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
2871 struct i40e_ring *tx_ring)
2877 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
2878 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
2879 * + 4 desc gap to avoid the cache line where head is,
2880 * + 1 desc for context descriptor,
2881 * otherwise try next time
2883 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2884 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2886 count += TXD_USE_COUNT(skb_headlen(skb));
2887 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2888 tx_ring->tx_stats.tx_busy++;
2895 * i40e_xmit_frame_ring - Sends buffer on Tx ring
2897 * @tx_ring: ring to send buffer on
2899 * Returns NETDEV_TX_OK if sent, else an error code
2901 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2902 struct i40e_ring *tx_ring)
2904 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
2905 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2906 struct i40e_tx_buffer *first;
2915 /* prefetch the data, we'll need it later */
2916 prefetch(skb->data);
2918 if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
2919 return NETDEV_TX_BUSY;
2921 /* prepare the xmit flags */
2922 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2925 /* obtain protocol of skb */
2926 protocol = vlan_get_protocol(skb);
2928 /* record the location of the first descriptor for this packet */
2929 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2931 /* setup IPv4/IPv6 offloads */
2932 if (protocol == htons(ETH_P_IP))
2933 tx_flags |= I40E_TX_FLAGS_IPV4;
2934 else if (protocol == htons(ETH_P_IPV6))
2935 tx_flags |= I40E_TX_FLAGS_IPV6;
2937 tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss);
2942 tx_flags |= I40E_TX_FLAGS_TSO;
2944 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
2947 tx_flags |= I40E_TX_FLAGS_TSYN;
2949 if (i40e_chk_linearize(skb, tx_flags)) {
2950 if (skb_linearize(skb))
2952 tx_ring->tx_stats.tx_linearize++;
2954 skb_tx_timestamp(skb);
2956 /* always enable CRC insertion offload */
2957 td_cmd |= I40E_TX_DESC_CMD_ICRC;
2959 /* Always offload the checksum, since it's in the data descriptor */
2960 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2961 tx_flags |= I40E_TX_FLAGS_CSUM;
2963 i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2964 tx_ring, &cd_tunneling);
2967 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2968 cd_tunneling, cd_l2tag2);
2970 /* Add Flow Director ATR if it's enabled.
2972 * NOTE: this must always be directly before the data descriptor.
2974 i40e_atr(tx_ring, skb, tx_flags, protocol);
2976 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2979 return NETDEV_TX_OK;
2982 dev_kfree_skb_any(skb);
2983 return NETDEV_TX_OK;
2987 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2989 * @netdev: network interface device structure
2991 * Returns NETDEV_TX_OK if sent, else an error code
2993 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2995 struct i40e_netdev_priv *np = netdev_priv(netdev);
2996 struct i40e_vsi *vsi = np->vsi;
2997 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
2999 /* hardware can't handle really short frames, hardware padding works
3002 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
3003 return NETDEV_TX_OK;
3005 return i40e_xmit_frame_ring(skb, tx_ring);