1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2014 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
27 #include <linux/prefetch.h>
28 #include <net/busy_poll.h>
30 #include "i40e_prototype.h"
32 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
35 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
36 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
37 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
38 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
39 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
42 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
43 #define I40E_FD_CLEAN_DELAY 10
45 * i40e_program_fdir_filter - Program a Flow Director filter
46 * @fdir_data: Packet data that will be filter parameters
47 * @raw_packet: the pre-allocated packet buffer for FDir
49 * @add: True for add/update, False for remove
51 int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
52 struct i40e_pf *pf, bool add)
54 struct i40e_filter_program_desc *fdir_desc;
55 struct i40e_tx_buffer *tx_buf, *first;
56 struct i40e_tx_desc *tx_desc;
57 struct i40e_ring *tx_ring;
58 unsigned int fpt, dcc;
66 /* find existing FDIR VSI */
68 for (i = 0; i < pf->num_alloc_vsi; i++)
69 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
74 tx_ring = vsi->tx_rings[0];
77 /* we need two descriptors to add/del a filter and we can wait */
79 if (I40E_DESC_UNUSED(tx_ring) > 1)
81 msleep_interruptible(1);
83 } while (delay < I40E_FD_CLEAN_DELAY);
85 if (!(I40E_DESC_UNUSED(tx_ring) > 1))
88 dma = dma_map_single(dev, raw_packet,
89 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
90 if (dma_mapping_error(dev, dma))
93 /* grab the next descriptor */
94 i = tx_ring->next_to_use;
95 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
96 first = &tx_ring->tx_bi[i];
97 memset(first, 0, sizeof(struct i40e_tx_buffer));
99 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
101 fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
102 I40E_TXD_FLTR_QW0_QINDEX_MASK;
104 fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
105 I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
107 fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
108 I40E_TXD_FLTR_QW0_PCTYPE_MASK;
110 /* Use LAN VSI Id if not programmed by user */
111 if (fdir_data->dest_vsi == 0)
112 fpt |= (pf->vsi[pf->lan_vsi]->id) <<
113 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
115 fpt |= ((u32)fdir_data->dest_vsi <<
116 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
117 I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
119 dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
122 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
123 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
125 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
126 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
128 dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
129 I40E_TXD_FLTR_QW1_DEST_MASK;
131 dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
132 I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
134 if (fdir_data->cnt_index != 0) {
135 dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
136 dcc |= ((u32)fdir_data->cnt_index <<
137 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
138 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
141 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
142 fdir_desc->rsvd = cpu_to_le32(0);
143 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
144 fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
146 /* Now program a dummy descriptor */
147 i = tx_ring->next_to_use;
148 tx_desc = I40E_TX_DESC(tx_ring, i);
149 tx_buf = &tx_ring->tx_bi[i];
151 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
153 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
155 /* record length, and DMA address */
156 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
157 dma_unmap_addr_set(tx_buf, dma, dma);
159 tx_desc->buffer_addr = cpu_to_le64(dma);
160 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
162 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
163 tx_buf->raw_buf = (void *)raw_packet;
165 tx_desc->cmd_type_offset_bsz =
166 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
168 /* Force memory writes to complete before letting h/w
169 * know there are new descriptors to fetch.
173 /* Mark the data descriptor to be watched */
174 first->next_to_watch = tx_desc;
176 writel(tx_ring->next_to_use, tx_ring->tail);
183 #define IP_HEADER_OFFSET 14
184 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
186 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
187 * @vsi: pointer to the targeted VSI
188 * @fd_data: the flow director data required for the FDir descriptor
189 * @add: true adds a filter, false removes it
191 * Returns 0 if the filters were successfully added or removed
193 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
194 struct i40e_fdir_filter *fd_data,
197 struct i40e_pf *pf = vsi->back;
203 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
204 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
205 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
207 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
210 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
212 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
213 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
214 + sizeof(struct iphdr));
216 ip->daddr = fd_data->dst_ip[0];
217 udp->dest = fd_data->dst_port;
218 ip->saddr = fd_data->src_ip[0];
219 udp->source = fd_data->src_port;
221 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
222 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
224 dev_info(&pf->pdev->dev,
225 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
226 fd_data->pctype, fd_data->fd_id, ret);
228 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
230 dev_info(&pf->pdev->dev,
231 "Filter OK for PCTYPE %d loc = %d\n",
232 fd_data->pctype, fd_data->fd_id);
234 dev_info(&pf->pdev->dev,
235 "Filter deleted for PCTYPE %d loc = %d\n",
236 fd_data->pctype, fd_data->fd_id);
241 return err ? -EOPNOTSUPP : 0;
244 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
246 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
247 * @vsi: pointer to the targeted VSI
248 * @fd_data: the flow director data required for the FDir descriptor
249 * @add: true adds a filter, false removes it
251 * Returns 0 if the filters were successfully added or removed
253 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
254 struct i40e_fdir_filter *fd_data,
257 struct i40e_pf *pf = vsi->back;
264 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
265 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
266 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
267 0x0, 0x72, 0, 0, 0, 0};
269 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
272 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
274 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
275 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
276 + sizeof(struct iphdr));
278 ip->daddr = fd_data->dst_ip[0];
279 tcp->dest = fd_data->dst_port;
280 ip->saddr = fd_data->src_ip[0];
281 tcp->source = fd_data->src_port;
285 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
286 if (I40E_DEBUG_FD & pf->hw.debug_mask)
287 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
288 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
291 pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
292 (pf->fd_tcp_rule - 1) : 0;
293 if (pf->fd_tcp_rule == 0) {
294 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
295 if (I40E_DEBUG_FD & pf->hw.debug_mask)
296 dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
300 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
301 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
304 dev_info(&pf->pdev->dev,
305 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
306 fd_data->pctype, fd_data->fd_id, ret);
308 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
310 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
311 fd_data->pctype, fd_data->fd_id);
313 dev_info(&pf->pdev->dev,
314 "Filter deleted for PCTYPE %d loc = %d\n",
315 fd_data->pctype, fd_data->fd_id);
321 return err ? -EOPNOTSUPP : 0;
325 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
326 * a specific flow spec
327 * @vsi: pointer to the targeted VSI
328 * @fd_data: the flow director data required for the FDir descriptor
329 * @add: true adds a filter, false removes it
331 * Returns 0 if the filters were successfully added or removed
333 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
334 struct i40e_fdir_filter *fd_data,
340 #define I40E_IP_DUMMY_PACKET_LEN 34
342 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
343 * a specific flow spec
344 * @vsi: pointer to the targeted VSI
345 * @fd_data: the flow director data required for the FDir descriptor
346 * @add: true adds a filter, false removes it
348 * Returns 0 if the filters were successfully added or removed
350 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
351 struct i40e_fdir_filter *fd_data,
354 struct i40e_pf *pf = vsi->back;
360 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
361 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
364 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
365 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
366 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
369 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
370 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
372 ip->saddr = fd_data->src_ip[0];
373 ip->daddr = fd_data->dst_ip[0];
377 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
380 dev_info(&pf->pdev->dev,
381 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
382 fd_data->pctype, fd_data->fd_id, ret);
384 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
386 dev_info(&pf->pdev->dev,
387 "Filter OK for PCTYPE %d loc = %d\n",
388 fd_data->pctype, fd_data->fd_id);
390 dev_info(&pf->pdev->dev,
391 "Filter deleted for PCTYPE %d loc = %d\n",
392 fd_data->pctype, fd_data->fd_id);
399 return err ? -EOPNOTSUPP : 0;
403 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
404 * @vsi: pointer to the targeted VSI
405 * @cmd: command to get or set RX flow classification rules
406 * @add: true adds a filter, false removes it
409 int i40e_add_del_fdir(struct i40e_vsi *vsi,
410 struct i40e_fdir_filter *input, bool add)
412 struct i40e_pf *pf = vsi->back;
415 switch (input->flow_type & ~FLOW_EXT) {
417 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
420 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
423 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
426 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
429 switch (input->ip4_proto) {
431 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
434 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
437 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
440 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
445 dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
450 /* The buffer allocated here is freed by the i40e_clean_tx_ring() */
455 * i40e_fd_handle_status - check the Programming Status for FD
456 * @rx_ring: the Rx ring for this descriptor
457 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
458 * @prog_id: the id originally used for programming
460 * This is used to verify if the FD programming or invalidation
461 * requested by SW to the HW is successful or not and take actions accordingly.
463 static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
464 union i40e_rx_desc *rx_desc, u8 prog_id)
466 struct i40e_pf *pf = rx_ring->vsi->back;
467 struct pci_dev *pdev = pf->pdev;
468 u32 fcnt_prog, fcnt_avail;
472 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
473 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
474 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
476 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
477 pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
478 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
479 (I40E_DEBUG_FD & pf->hw.debug_mask))
480 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
483 /* Check if the programming error is for ATR.
484 * If so, auto disable ATR and set a state for
485 * flush in progress. Next time we come here if flush is in
486 * progress do nothing, once flush is complete the state will
489 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
493 /* store the current atr filter count */
494 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
496 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
497 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
498 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
499 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
502 /* filter programming failed most likely due to table full */
503 fcnt_prog = i40e_get_global_fd_count(pf);
504 fcnt_avail = pf->fdir_pf_filter_count;
505 /* If ATR is running fcnt_prog can quickly change,
506 * if we are very close to full, it makes sense to disable
507 * FD ATR/SB and then re-enable it when there is room.
509 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
510 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
511 !(pf->auto_disable_flags &
512 I40E_FLAG_FD_SB_ENABLED)) {
513 if (I40E_DEBUG_FD & pf->hw.debug_mask)
514 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
515 pf->auto_disable_flags |=
516 I40E_FLAG_FD_SB_ENABLED;
519 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
520 if (I40E_DEBUG_FD & pf->hw.debug_mask)
521 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
522 rx_desc->wb.qword0.hi_dword.fd_id);
527 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
528 * @ring: the ring that owns the buffer
529 * @tx_buffer: the buffer to free
531 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
532 struct i40e_tx_buffer *tx_buffer)
534 if (tx_buffer->skb) {
535 dev_kfree_skb_any(tx_buffer->skb);
536 if (dma_unmap_len(tx_buffer, len))
537 dma_unmap_single(ring->dev,
538 dma_unmap_addr(tx_buffer, dma),
539 dma_unmap_len(tx_buffer, len),
541 } else if (dma_unmap_len(tx_buffer, len)) {
542 dma_unmap_page(ring->dev,
543 dma_unmap_addr(tx_buffer, dma),
544 dma_unmap_len(tx_buffer, len),
548 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
549 kfree(tx_buffer->raw_buf);
551 tx_buffer->next_to_watch = NULL;
552 tx_buffer->skb = NULL;
553 dma_unmap_len_set(tx_buffer, len, 0);
554 /* tx_buffer must be completely set up in the transmit path */
558 * i40e_clean_tx_ring - Free any empty Tx buffers
559 * @tx_ring: ring to be cleaned
561 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
563 unsigned long bi_size;
566 /* ring already cleared, nothing to do */
570 /* Free all the Tx ring sk_buffs */
571 for (i = 0; i < tx_ring->count; i++)
572 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
574 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
575 memset(tx_ring->tx_bi, 0, bi_size);
577 /* Zero out the descriptor ring */
578 memset(tx_ring->desc, 0, tx_ring->size);
580 tx_ring->next_to_use = 0;
581 tx_ring->next_to_clean = 0;
583 if (!tx_ring->netdev)
586 /* cleanup Tx queue statistics */
587 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
588 tx_ring->queue_index));
592 * i40e_free_tx_resources - Free Tx resources per queue
593 * @tx_ring: Tx descriptor ring for a specific queue
595 * Free all transmit software resources
597 void i40e_free_tx_resources(struct i40e_ring *tx_ring)
599 i40e_clean_tx_ring(tx_ring);
600 kfree(tx_ring->tx_bi);
601 tx_ring->tx_bi = NULL;
604 dma_free_coherent(tx_ring->dev, tx_ring->size,
605 tx_ring->desc, tx_ring->dma);
606 tx_ring->desc = NULL;
611 * i40e_get_tx_pending - how many tx descriptors not processed
612 * @tx_ring: the ring of descriptors
614 * Since there is no access to the ring head register
615 * in XL710, we need to use our local copies
617 u32 i40e_get_tx_pending(struct i40e_ring *ring)
621 head = i40e_get_head(ring);
622 tail = readl(ring->tail);
625 return (head < tail) ?
626 tail - head : (tail + ring->count - head);
631 #define WB_STRIDE 0x3
634 * i40e_clean_tx_irq - Reclaim resources after transmit completes
635 * @tx_ring: tx ring to clean
636 * @budget: how many cleans we're allowed
638 * Returns true if there's any budget left (e.g. the clean is finished)
640 static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
642 u16 i = tx_ring->next_to_clean;
643 struct i40e_tx_buffer *tx_buf;
644 struct i40e_tx_desc *tx_head;
645 struct i40e_tx_desc *tx_desc;
646 unsigned int total_packets = 0;
647 unsigned int total_bytes = 0;
649 tx_buf = &tx_ring->tx_bi[i];
650 tx_desc = I40E_TX_DESC(tx_ring, i);
653 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
656 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
658 /* if next_to_watch is not set then there is no work pending */
662 /* prevent any other reads prior to eop_desc */
663 read_barrier_depends();
665 /* we have caught up to head, no work left to do */
666 if (tx_head == tx_desc)
669 /* clear next_to_watch to prevent false hangs */
670 tx_buf->next_to_watch = NULL;
672 /* update the statistics for this packet */
673 total_bytes += tx_buf->bytecount;
674 total_packets += tx_buf->gso_segs;
677 dev_consume_skb_any(tx_buf->skb);
679 /* unmap skb header data */
680 dma_unmap_single(tx_ring->dev,
681 dma_unmap_addr(tx_buf, dma),
682 dma_unmap_len(tx_buf, len),
685 /* clear tx_buffer data */
687 dma_unmap_len_set(tx_buf, len, 0);
689 /* unmap remaining buffers */
690 while (tx_desc != eop_desc) {
697 tx_buf = tx_ring->tx_bi;
698 tx_desc = I40E_TX_DESC(tx_ring, 0);
701 /* unmap any remaining paged data */
702 if (dma_unmap_len(tx_buf, len)) {
703 dma_unmap_page(tx_ring->dev,
704 dma_unmap_addr(tx_buf, dma),
705 dma_unmap_len(tx_buf, len),
707 dma_unmap_len_set(tx_buf, len, 0);
711 /* move us one more past the eop_desc for start of next pkt */
717 tx_buf = tx_ring->tx_bi;
718 tx_desc = I40E_TX_DESC(tx_ring, 0);
723 /* update budget accounting */
725 } while (likely(budget));
728 tx_ring->next_to_clean = i;
729 u64_stats_update_begin(&tx_ring->syncp);
730 tx_ring->stats.bytes += total_bytes;
731 tx_ring->stats.packets += total_packets;
732 u64_stats_update_end(&tx_ring->syncp);
733 tx_ring->q_vector->tx.total_bytes += total_bytes;
734 tx_ring->q_vector->tx.total_packets += total_packets;
736 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
739 /* check to see if there are < 4 descriptors
740 * waiting to be written back, then kick the hardware to force
741 * them to be written back in case we stay in NAPI.
742 * In this mode on X722 we do not enable Interrupt.
744 j = i40e_get_tx_pending(tx_ring);
747 ((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
748 !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
749 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
750 tx_ring->arm_wb = true;
753 netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
754 tx_ring->queue_index),
755 total_packets, total_bytes);
757 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
758 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
759 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
760 /* Make sure that anybody stopping the queue after this
761 * sees the new next_to_clean.
764 if (__netif_subqueue_stopped(tx_ring->netdev,
765 tx_ring->queue_index) &&
766 !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
767 netif_wake_subqueue(tx_ring->netdev,
768 tx_ring->queue_index);
769 ++tx_ring->tx_stats.restart_queue;
777 * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors
778 * @vsi: the VSI we care about
779 * @q_vector: the vector on which to force writeback
782 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
784 u16 flags = q_vector->tx.ring[0].flags;
786 if (flags & I40E_TXR_FLAGS_WB_ON_ITR) {
789 if (q_vector->arm_wb_state)
792 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK;
795 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
796 vsi->base_vector - 1),
798 q_vector->arm_wb_state = true;
799 } else if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
800 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
801 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
802 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
803 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
804 /* allow 00 to be written to the index */
807 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
808 vsi->base_vector - 1), val);
810 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
811 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
812 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
813 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
814 /* allow 00 to be written to the index */
816 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
821 * i40e_set_new_dynamic_itr - Find new ITR level
822 * @rc: structure containing ring performance data
824 * Returns true if ITR changed, false if not
826 * Stores a new ITR value based on packets and byte counts during
827 * the last interrupt. The advantage of per interrupt computation
828 * is faster updates and more accurate ITR for the current traffic
829 * pattern. Constants in this function were computed based on
830 * theoretical maximum wire speed and thresholds were set based on
831 * testing data as well as attempting to minimize response time
832 * while increasing bulk throughput.
834 static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
836 enum i40e_latency_range new_latency_range = rc->latency_range;
837 struct i40e_q_vector *qv = rc->ring->q_vector;
838 u32 new_itr = rc->itr;
842 if (rc->total_packets == 0 || !rc->itr)
845 /* simple throttlerate management
846 * 0-10MB/s lowest (50000 ints/s)
847 * 10-20MB/s low (20000 ints/s)
848 * 20-1249MB/s bulk (18000 ints/s)
849 * > 40000 Rx packets per second (8000 ints/s)
851 * The math works out because the divisor is in 10^(-6) which
852 * turns the bytes/us input value into MB/s values, but
853 * make sure to use usecs, as the register values written
854 * are in 2 usec increments in the ITR registers, and make sure
855 * to use the smoothed values that the countdown timer gives us.
857 usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
858 bytes_per_int = rc->total_bytes / usecs;
860 switch (new_latency_range) {
861 case I40E_LOWEST_LATENCY:
862 if (bytes_per_int > 10)
863 new_latency_range = I40E_LOW_LATENCY;
865 case I40E_LOW_LATENCY:
866 if (bytes_per_int > 20)
867 new_latency_range = I40E_BULK_LATENCY;
868 else if (bytes_per_int <= 10)
869 new_latency_range = I40E_LOWEST_LATENCY;
871 case I40E_BULK_LATENCY:
872 case I40E_ULTRA_LATENCY:
874 if (bytes_per_int <= 20)
875 new_latency_range = I40E_LOW_LATENCY;
879 /* this is to adjust RX more aggressively when streaming small
880 * packets. The value of 40000 was picked as it is just beyond
881 * what the hardware can receive per second if in low latency
884 #define RX_ULTRA_PACKET_RATE 40000
886 if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
888 new_latency_range = I40E_ULTRA_LATENCY;
890 rc->latency_range = new_latency_range;
892 switch (new_latency_range) {
893 case I40E_LOWEST_LATENCY:
894 new_itr = I40E_ITR_50K;
896 case I40E_LOW_LATENCY:
897 new_itr = I40E_ITR_20K;
899 case I40E_BULK_LATENCY:
900 new_itr = I40E_ITR_18K;
902 case I40E_ULTRA_LATENCY:
903 new_itr = I40E_ITR_8K;
910 rc->total_packets = 0;
912 if (new_itr != rc->itr) {
921 * i40e_clean_programming_status - clean the programming status descriptor
922 * @rx_ring: the rx ring that has this descriptor
923 * @rx_desc: the rx descriptor written back by HW
925 * Flow director should handle FD_FILTER_STATUS to check its filter programming
926 * status being successful or not and take actions accordingly. FCoE should
927 * handle its context/filter programming/invalidation status and take actions.
930 static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
931 union i40e_rx_desc *rx_desc)
936 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
937 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
938 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
940 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
941 i40e_fd_handle_status(rx_ring, rx_desc, id);
943 else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
944 (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
945 i40e_fcoe_handle_status(rx_ring, rx_desc, id);
950 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
951 * @tx_ring: the tx ring to set up
953 * Return 0 on success, negative on error
955 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
957 struct device *dev = tx_ring->dev;
963 /* warn if we are about to overwrite the pointer */
964 WARN_ON(tx_ring->tx_bi);
965 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
966 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
970 /* round up to nearest 4K */
971 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
972 /* add u32 for head writeback, align after this takes care of
973 * guaranteeing this is at least one cache line in size
975 tx_ring->size += sizeof(u32);
976 tx_ring->size = ALIGN(tx_ring->size, 4096);
977 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
978 &tx_ring->dma, GFP_KERNEL);
979 if (!tx_ring->desc) {
980 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
985 tx_ring->next_to_use = 0;
986 tx_ring->next_to_clean = 0;
990 kfree(tx_ring->tx_bi);
991 tx_ring->tx_bi = NULL;
996 * i40e_clean_rx_ring - Free Rx buffers
997 * @rx_ring: ring to be cleaned
999 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1001 struct device *dev = rx_ring->dev;
1002 struct i40e_rx_buffer *rx_bi;
1003 unsigned long bi_size;
1006 /* ring already cleared, nothing to do */
1007 if (!rx_ring->rx_bi)
1010 if (ring_is_ps_enabled(rx_ring)) {
1011 int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
1013 rx_bi = &rx_ring->rx_bi[0];
1014 if (rx_bi->hdr_buf) {
1015 dma_free_coherent(dev,
1019 for (i = 0; i < rx_ring->count; i++) {
1020 rx_bi = &rx_ring->rx_bi[i];
1022 rx_bi->hdr_buf = NULL;
1026 /* Free all the Rx ring sk_buffs */
1027 for (i = 0; i < rx_ring->count; i++) {
1028 rx_bi = &rx_ring->rx_bi[i];
1030 dma_unmap_single(dev,
1032 rx_ring->rx_buf_len,
1037 dev_kfree_skb(rx_bi->skb);
1041 if (rx_bi->page_dma) {
1046 rx_bi->page_dma = 0;
1048 __free_page(rx_bi->page);
1050 rx_bi->page_offset = 0;
1054 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1055 memset(rx_ring->rx_bi, 0, bi_size);
1057 /* Zero out the descriptor ring */
1058 memset(rx_ring->desc, 0, rx_ring->size);
1060 rx_ring->next_to_clean = 0;
1061 rx_ring->next_to_use = 0;
1065 * i40e_free_rx_resources - Free Rx resources
1066 * @rx_ring: ring to clean the resources from
1068 * Free all receive software resources
1070 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1072 i40e_clean_rx_ring(rx_ring);
1073 kfree(rx_ring->rx_bi);
1074 rx_ring->rx_bi = NULL;
1076 if (rx_ring->desc) {
1077 dma_free_coherent(rx_ring->dev, rx_ring->size,
1078 rx_ring->desc, rx_ring->dma);
1079 rx_ring->desc = NULL;
1084 * i40e_alloc_rx_headers - allocate rx header buffers
1085 * @rx_ring: ring to alloc buffers
1087 * Allocate rx header buffers for the entire ring. As these are static,
1088 * this is only called when setting up a new ring.
1090 void i40e_alloc_rx_headers(struct i40e_ring *rx_ring)
1092 struct device *dev = rx_ring->dev;
1093 struct i40e_rx_buffer *rx_bi;
1099 if (rx_ring->rx_bi[0].hdr_buf)
1101 /* Make sure the buffers don't cross cache line boundaries. */
1102 buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
1103 buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
1107 for (i = 0; i < rx_ring->count; i++) {
1108 rx_bi = &rx_ring->rx_bi[i];
1109 rx_bi->dma = dma + (i * buf_size);
1110 rx_bi->hdr_buf = buffer + (i * buf_size);
1115 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1116 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1118 * Returns 0 on success, negative on failure
1120 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1122 struct device *dev = rx_ring->dev;
1125 /* warn if we are about to overwrite the pointer */
1126 WARN_ON(rx_ring->rx_bi);
1127 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1128 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1129 if (!rx_ring->rx_bi)
1132 u64_stats_init(&rx_ring->syncp);
1134 /* Round up to nearest 4K */
1135 rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
1136 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
1137 : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1138 rx_ring->size = ALIGN(rx_ring->size, 4096);
1139 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1140 &rx_ring->dma, GFP_KERNEL);
1142 if (!rx_ring->desc) {
1143 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1148 rx_ring->next_to_clean = 0;
1149 rx_ring->next_to_use = 0;
1153 kfree(rx_ring->rx_bi);
1154 rx_ring->rx_bi = NULL;
1159 * i40e_release_rx_desc - Store the new tail and head values
1160 * @rx_ring: ring to bump
1161 * @val: new head index
1163 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1165 rx_ring->next_to_use = val;
1166 /* Force memory writes to complete before letting h/w
1167 * know there are new descriptors to fetch. (Only
1168 * applicable for weak-ordered memory model archs,
1172 writel(val, rx_ring->tail);
1176 * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
1177 * @rx_ring: ring to place buffers on
1178 * @cleaned_count: number of buffers to replace
1180 void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
1182 u16 i = rx_ring->next_to_use;
1183 union i40e_rx_desc *rx_desc;
1184 struct i40e_rx_buffer *bi;
1186 /* do nothing if no valid netdev defined */
1187 if (!rx_ring->netdev || !cleaned_count)
1190 while (cleaned_count--) {
1191 rx_desc = I40E_RX_DESC(rx_ring, i);
1192 bi = &rx_ring->rx_bi[i];
1194 if (bi->skb) /* desc is in use */
1197 bi->page = alloc_page(GFP_ATOMIC);
1199 rx_ring->rx_stats.alloc_page_failed++;
1204 if (!bi->page_dma) {
1205 /* use a half page if we're re-using */
1206 bi->page_offset ^= PAGE_SIZE / 2;
1207 bi->page_dma = dma_map_page(rx_ring->dev,
1212 if (dma_mapping_error(rx_ring->dev,
1214 rx_ring->rx_stats.alloc_page_failed++;
1220 dma_sync_single_range_for_device(rx_ring->dev,
1223 rx_ring->rx_hdr_len,
1225 /* Refresh the desc even if buffer_addrs didn't change
1226 * because each write-back erases this info.
1228 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1229 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1231 if (i == rx_ring->count)
1236 if (rx_ring->next_to_use != i)
1237 i40e_release_rx_desc(rx_ring, i);
1241 * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
1242 * @rx_ring: ring to place buffers on
1243 * @cleaned_count: number of buffers to replace
1245 void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
1247 u16 i = rx_ring->next_to_use;
1248 union i40e_rx_desc *rx_desc;
1249 struct i40e_rx_buffer *bi;
1250 struct sk_buff *skb;
1252 /* do nothing if no valid netdev defined */
1253 if (!rx_ring->netdev || !cleaned_count)
1256 while (cleaned_count--) {
1257 rx_desc = I40E_RX_DESC(rx_ring, i);
1258 bi = &rx_ring->rx_bi[i];
1262 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1263 rx_ring->rx_buf_len);
1265 rx_ring->rx_stats.alloc_buff_failed++;
1268 /* initialize queue mapping */
1269 skb_record_rx_queue(skb, rx_ring->queue_index);
1274 bi->dma = dma_map_single(rx_ring->dev,
1276 rx_ring->rx_buf_len,
1278 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
1279 rx_ring->rx_stats.alloc_buff_failed++;
1285 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
1286 rx_desc->read.hdr_addr = 0;
1288 if (i == rx_ring->count)
1293 if (rx_ring->next_to_use != i)
1294 i40e_release_rx_desc(rx_ring, i);
1298 * i40e_receive_skb - Send a completed packet up the stack
1299 * @rx_ring: rx ring in play
1300 * @skb: packet to send up
1301 * @vlan_tag: vlan tag for packet
1303 static void i40e_receive_skb(struct i40e_ring *rx_ring,
1304 struct sk_buff *skb, u16 vlan_tag)
1306 struct i40e_q_vector *q_vector = rx_ring->q_vector;
1308 if (vlan_tag & VLAN_VID_MASK)
1309 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1311 napi_gro_receive(&q_vector->napi, skb);
1315 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1316 * @vsi: the VSI we care about
1317 * @skb: skb currently being received and modified
1318 * @rx_status: status value of last descriptor in packet
1319 * @rx_error: error value of last descriptor in packet
1320 * @rx_ptype: ptype value of last descriptor in packet
1322 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1323 struct sk_buff *skb,
1328 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
1329 bool ipv4 = false, ipv6 = false;
1330 bool ipv4_tunnel, ipv6_tunnel;
1335 ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
1336 (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
1337 ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
1338 (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
1340 skb->ip_summed = CHECKSUM_NONE;
1342 /* Rx csum enabled and ip headers found? */
1343 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1346 /* did the hardware decode the packet and checksum? */
1347 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1350 /* both known and outer_ip must be set for the below code to work */
1351 if (!(decoded.known && decoded.outer_ip))
1354 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1355 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
1357 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1358 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
1362 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1363 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1366 /* likely incorrect csum if alternate IP extension headers found */
1368 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1369 /* don't increment checksum err here, non-fatal err */
1372 /* there was some L4 error, count error and punt packet to the stack */
1373 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1376 /* handle packets that were not able to be checksummed due
1377 * to arrival speed, in this case the stack can compute
1380 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1383 /* If VXLAN traffic has an outer UDPv4 checksum we need to check
1384 * it in the driver, hardware does not do it for us.
1385 * Since L3L4P bit was set we assume a valid IHL value (>=5)
1386 * so the total length of IPv4 header is IHL*4 bytes
1387 * The UDP_0 bit *may* bet set if the *inner* header is UDP
1389 if (!(vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) &&
1391 skb->transport_header = skb->mac_header +
1392 sizeof(struct ethhdr) +
1393 (ip_hdr(skb)->ihl * 4);
1395 /* Add 4 bytes for VLAN tagged packets */
1396 skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
1397 skb->protocol == htons(ETH_P_8021AD))
1400 if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
1401 (udp_hdr(skb)->check != 0)) {
1402 rx_udp_csum = udp_csum(skb);
1404 csum = csum_tcpudp_magic(
1405 iph->saddr, iph->daddr,
1406 (skb->len - skb_transport_offset(skb)),
1407 IPPROTO_UDP, rx_udp_csum);
1409 if (udp_hdr(skb)->check != csum)
1412 } /* else its GRE and so no outer UDP header */
1415 skb->ip_summed = CHECKSUM_UNNECESSARY;
1416 skb->csum_level = ipv4_tunnel || ipv6_tunnel;
1421 vsi->back->hw_csum_rx_error++;
1425 * i40e_rx_hash - returns the hash value from the Rx descriptor
1426 * @ring: descriptor ring
1427 * @rx_desc: specific descriptor
1429 static inline u32 i40e_rx_hash(struct i40e_ring *ring,
1430 union i40e_rx_desc *rx_desc)
1432 const __le64 rss_mask =
1433 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1434 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1436 if ((ring->netdev->features & NETIF_F_RXHASH) &&
1437 (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
1438 return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1444 * i40e_ptype_to_hash - get a hash type
1445 * @ptype: the ptype value from the descriptor
1447 * Returns a hash type to be used by skb_set_hash
1449 static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
1451 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1454 return PKT_HASH_TYPE_NONE;
1456 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1457 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1458 return PKT_HASH_TYPE_L4;
1459 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1460 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1461 return PKT_HASH_TYPE_L3;
1463 return PKT_HASH_TYPE_L2;
1467 * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
1468 * @rx_ring: rx ring to clean
1469 * @budget: how many cleans we're allowed
1471 * Returns true if there's any budget left (e.g. the clean is finished)
1473 static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
1475 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1476 u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
1477 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1478 const int current_node = numa_mem_id();
1479 struct i40e_vsi *vsi = rx_ring->vsi;
1480 u16 i = rx_ring->next_to_clean;
1481 union i40e_rx_desc *rx_desc;
1482 u32 rx_error, rx_status;
1490 struct i40e_rx_buffer *rx_bi;
1491 struct sk_buff *skb;
1493 /* return some buffers to hardware, one at a time is too slow */
1494 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1495 i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count);
1499 i = rx_ring->next_to_clean;
1500 rx_desc = I40E_RX_DESC(rx_ring, i);
1501 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1502 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1503 I40E_RXD_QW1_STATUS_SHIFT;
1505 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
1508 /* This memory barrier is needed to keep us from reading
1509 * any other fields out of the rx_desc until we know the
1513 if (i40e_rx_is_programming_status(qword)) {
1514 i40e_clean_programming_status(rx_ring, rx_desc);
1515 I40E_RX_INCREMENT(rx_ring, i);
1518 rx_bi = &rx_ring->rx_bi[i];
1521 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1522 rx_ring->rx_hdr_len);
1524 rx_ring->rx_stats.alloc_buff_failed++;
1528 /* initialize queue mapping */
1529 skb_record_rx_queue(skb, rx_ring->queue_index);
1530 /* we are reusing so sync this buffer for CPU use */
1531 dma_sync_single_range_for_cpu(rx_ring->dev,
1534 rx_ring->rx_hdr_len,
1537 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1538 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1539 rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
1540 I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
1541 rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
1542 I40E_RXD_QW1_LENGTH_SPH_SHIFT;
1544 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1545 I40E_RXD_QW1_ERROR_SHIFT;
1546 rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1547 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1549 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1550 I40E_RXD_QW1_PTYPE_SHIFT;
1551 prefetch(rx_bi->page);
1554 if (rx_hbo || rx_sph) {
1558 len = I40E_RX_HDR_SIZE;
1560 len = rx_header_len;
1561 memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
1562 } else if (skb->len == 0) {
1565 len = (rx_packet_len > skb_headlen(skb) ?
1566 skb_headlen(skb) : rx_packet_len);
1567 memcpy(__skb_put(skb, len),
1568 rx_bi->page + rx_bi->page_offset,
1570 rx_bi->page_offset += len;
1571 rx_packet_len -= len;
1574 /* Get the rest of the data if this was a header split */
1575 if (rx_packet_len) {
1576 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1581 skb->len += rx_packet_len;
1582 skb->data_len += rx_packet_len;
1583 skb->truesize += rx_packet_len;
1585 if ((page_count(rx_bi->page) == 1) &&
1586 (page_to_nid(rx_bi->page) == current_node))
1587 get_page(rx_bi->page);
1591 dma_unmap_page(rx_ring->dev,
1595 rx_bi->page_dma = 0;
1597 I40E_RX_INCREMENT(rx_ring, i);
1600 !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1601 struct i40e_rx_buffer *next_buffer;
1603 next_buffer = &rx_ring->rx_bi[i];
1604 next_buffer->skb = skb;
1605 rx_ring->rx_stats.non_eop_descs++;
1609 /* ERR_MASK will only have valid bits if EOP set */
1610 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1611 dev_kfree_skb_any(skb);
1615 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
1616 i40e_ptype_to_hash(rx_ptype));
1617 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1618 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1619 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1620 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1621 rx_ring->last_rx_timestamp = jiffies;
1624 /* probably a little skewed due to removing CRC */
1625 total_rx_bytes += skb->len;
1628 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1630 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1632 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1633 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1636 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1637 dev_kfree_skb_any(skb);
1641 i40e_receive_skb(rx_ring, skb, vlan_tag);
1643 rx_desc->wb.qword1.status_error_len = 0;
1645 } while (likely(total_rx_packets < budget));
1647 u64_stats_update_begin(&rx_ring->syncp);
1648 rx_ring->stats.packets += total_rx_packets;
1649 rx_ring->stats.bytes += total_rx_bytes;
1650 u64_stats_update_end(&rx_ring->syncp);
1651 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1652 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1654 return total_rx_packets;
1658 * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
1659 * @rx_ring: rx ring to clean
1660 * @budget: how many cleans we're allowed
1662 * Returns number of packets cleaned
1664 static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
1666 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1667 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1668 struct i40e_vsi *vsi = rx_ring->vsi;
1669 union i40e_rx_desc *rx_desc;
1670 u32 rx_error, rx_status;
1677 struct i40e_rx_buffer *rx_bi;
1678 struct sk_buff *skb;
1680 /* return some buffers to hardware, one at a time is too slow */
1681 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1682 i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count);
1686 i = rx_ring->next_to_clean;
1687 rx_desc = I40E_RX_DESC(rx_ring, i);
1688 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1689 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1690 I40E_RXD_QW1_STATUS_SHIFT;
1692 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
1695 /* This memory barrier is needed to keep us from reading
1696 * any other fields out of the rx_desc until we know the
1701 if (i40e_rx_is_programming_status(qword)) {
1702 i40e_clean_programming_status(rx_ring, rx_desc);
1703 I40E_RX_INCREMENT(rx_ring, i);
1706 rx_bi = &rx_ring->rx_bi[i];
1708 prefetch(skb->data);
1710 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1711 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1713 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1714 I40E_RXD_QW1_ERROR_SHIFT;
1715 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1717 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1718 I40E_RXD_QW1_PTYPE_SHIFT;
1722 /* Get the header and possibly the whole packet
1723 * If this is an skb from previous receive dma will be 0
1725 skb_put(skb, rx_packet_len);
1726 dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
1730 I40E_RX_INCREMENT(rx_ring, i);
1733 !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1734 rx_ring->rx_stats.non_eop_descs++;
1738 /* ERR_MASK will only have valid bits if EOP set */
1739 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1740 dev_kfree_skb_any(skb);
1744 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
1745 i40e_ptype_to_hash(rx_ptype));
1746 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1747 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1748 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1749 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1750 rx_ring->last_rx_timestamp = jiffies;
1753 /* probably a little skewed due to removing CRC */
1754 total_rx_bytes += skb->len;
1757 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1759 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1761 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1762 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1765 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1766 dev_kfree_skb_any(skb);
1770 i40e_receive_skb(rx_ring, skb, vlan_tag);
1772 rx_desc->wb.qword1.status_error_len = 0;
1773 } while (likely(total_rx_packets < budget));
1775 u64_stats_update_begin(&rx_ring->syncp);
1776 rx_ring->stats.packets += total_rx_packets;
1777 rx_ring->stats.bytes += total_rx_bytes;
1778 u64_stats_update_end(&rx_ring->syncp);
1779 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1780 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1782 return total_rx_packets;
1785 static u32 i40e_buildreg_itr(const int type, const u16 itr)
1789 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1790 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1791 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1792 (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
1797 /* a small macro to shorten up some long lines */
1798 #define INTREG I40E_PFINT_DYN_CTLN
1801 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
1802 * @vsi: the VSI we care about
1803 * @q_vector: q_vector for which itr is being updated and interrupt enabled
1806 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
1807 struct i40e_q_vector *q_vector)
1809 struct i40e_hw *hw = &vsi->back->hw;
1810 bool rx = false, tx = false;
1814 vector = (q_vector->v_idx + vsi->base_vector);
1816 /* avoid dynamic calculation if in countdown mode OR if
1817 * all dynamic is disabled
1819 rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
1821 if (q_vector->itr_countdown > 0 ||
1822 (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) &&
1823 !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) {
1827 if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
1828 rx = i40e_set_new_dynamic_itr(&q_vector->rx);
1829 rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
1832 if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
1833 tx = i40e_set_new_dynamic_itr(&q_vector->tx);
1834 txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
1838 /* get the higher of the two ITR adjustments and
1839 * use the same value for both ITR registers
1840 * when in adaptive mode (Rx and/or Tx)
1842 u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
1844 q_vector->tx.itr = q_vector->rx.itr = itr;
1845 txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
1847 rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
1851 /* only need to enable the interrupt once, but need
1852 * to possibly update both ITR values
1855 /* set the INTENA_MSK_MASK so that this first write
1856 * won't actually enable the interrupt, instead just
1857 * updating the ITR (it's bit 31 PF and VF)
1860 /* don't check _DOWN because interrupt isn't being enabled */
1861 wr32(hw, INTREG(vector - 1), rxval);
1865 if (!test_bit(__I40E_DOWN, &vsi->state))
1866 wr32(hw, INTREG(vector - 1), txval);
1868 if (q_vector->itr_countdown)
1869 q_vector->itr_countdown--;
1871 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1875 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
1876 * @napi: napi struct with our devices info in it
1877 * @budget: amount of work driver is allowed to do this pass, in packets
1879 * This function will clean all queues associated with a q_vector.
1881 * Returns the amount of work done
1883 int i40e_napi_poll(struct napi_struct *napi, int budget)
1885 struct i40e_q_vector *q_vector =
1886 container_of(napi, struct i40e_q_vector, napi);
1887 struct i40e_vsi *vsi = q_vector->vsi;
1888 struct i40e_ring *ring;
1889 bool clean_complete = true;
1890 bool arm_wb = false;
1891 int budget_per_ring;
1894 if (test_bit(__I40E_DOWN, &vsi->state)) {
1895 napi_complete(napi);
1899 /* Clear hung_detected bit */
1900 clear_bit(I40E_Q_VECTOR_HUNG_DETECT, &q_vector->hung_detected);
1901 /* Since the actual Tx work is minimal, we can give the Tx a larger
1902 * budget and be more aggressive about cleaning up the Tx descriptors.
1904 i40e_for_each_ring(ring, q_vector->tx) {
1905 clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
1906 arm_wb = arm_wb || ring->arm_wb;
1907 ring->arm_wb = false;
1910 /* Handle case where we are called by netpoll with a budget of 0 */
1914 /* We attempt to distribute budget to each Rx queue fairly, but don't
1915 * allow the budget to go below 1 because that would exit polling early.
1917 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
1919 i40e_for_each_ring(ring, q_vector->rx) {
1922 if (ring_is_ps_enabled(ring))
1923 cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
1925 cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
1927 work_done += cleaned;
1928 /* if we didn't clean as many as budgeted, we must be done */
1929 clean_complete &= (budget_per_ring != cleaned);
1932 /* If work not completed, return budget and polling will return */
1933 if (!clean_complete) {
1936 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
1937 i40e_force_wb(vsi, q_vector);
1942 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
1943 q_vector->arm_wb_state = false;
1945 /* Work is done so exit the polling mode and re-enable the interrupt */
1946 napi_complete_done(napi, work_done);
1947 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
1948 i40e_update_enable_itr(vsi, q_vector);
1949 } else { /* Legacy mode */
1950 struct i40e_hw *hw = &vsi->back->hw;
1951 /* We re-enable the queue 0 cause, but
1952 * don't worry about dynamic_enable
1953 * because we left it on for the other
1954 * possible interrupts during napi
1956 u32 qval = rd32(hw, I40E_QINT_RQCTL(0)) |
1957 I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1959 wr32(hw, I40E_QINT_RQCTL(0), qval);
1960 qval = rd32(hw, I40E_QINT_TQCTL(0)) |
1961 I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1962 wr32(hw, I40E_QINT_TQCTL(0), qval);
1963 i40e_irq_dynamic_enable_icr0(vsi->back);
1969 * i40e_atr - Add a Flow Director ATR filter
1970 * @tx_ring: ring to add programming descriptor to
1972 * @tx_flags: send tx flags
1973 * @protocol: wire protocol
1975 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
1976 u32 tx_flags, __be16 protocol)
1978 struct i40e_filter_program_desc *fdir_desc;
1979 struct i40e_pf *pf = tx_ring->vsi->back;
1981 unsigned char *network;
1983 struct ipv6hdr *ipv6;
1987 u32 flex_ptype, dtype_cmd;
1990 /* make sure ATR is enabled */
1991 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
1994 if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1997 /* if sampling is disabled do nothing */
1998 if (!tx_ring->atr_sample_rate)
2001 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
2004 if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) {
2005 /* snag network header to get L4 type and address */
2006 hdr.network = skb_network_header(skb);
2008 /* Currently only IPv4/IPv6 with TCP is supported
2009 * access ihl as u8 to avoid unaligned access on ia64
2011 if (tx_flags & I40E_TX_FLAGS_IPV4)
2012 hlen = (hdr.network[0] & 0x0F) << 2;
2013 else if (protocol == htons(ETH_P_IPV6))
2014 hlen = sizeof(struct ipv6hdr);
2018 hdr.network = skb_inner_network_header(skb);
2019 hlen = skb_inner_network_header_len(skb);
2022 /* Currently only IPv4/IPv6 with TCP is supported
2023 * Note: tx_flags gets modified to reflect inner protocols in
2024 * tx_enable_csum function if encap is enabled.
2026 if ((tx_flags & I40E_TX_FLAGS_IPV4) &&
2027 (hdr.ipv4->protocol != IPPROTO_TCP))
2029 else if ((tx_flags & I40E_TX_FLAGS_IPV6) &&
2030 (hdr.ipv6->nexthdr != IPPROTO_TCP))
2033 th = (struct tcphdr *)(hdr.network + hlen);
2035 /* Due to lack of space, no more new filters can be programmed */
2036 if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
2038 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) {
2039 /* HW ATR eviction will take care of removing filters on FIN
2042 if (th->fin || th->rst)
2046 tx_ring->atr_count++;
2048 /* sample on all syn/fin/rst packets or once every atr sample rate */
2052 (tx_ring->atr_count < tx_ring->atr_sample_rate))
2055 tx_ring->atr_count = 0;
2057 /* grab the next descriptor */
2058 i = tx_ring->next_to_use;
2059 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2062 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2064 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2065 I40E_TXD_FLTR_QW0_QINDEX_MASK;
2066 flex_ptype |= (protocol == htons(ETH_P_IP)) ?
2067 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2068 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2069 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2070 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2072 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2074 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2076 dtype_cmd |= (th->fin || th->rst) ?
2077 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2078 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2079 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2080 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2082 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2083 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2085 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2086 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2088 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2089 if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL))
2091 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2092 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2093 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2096 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2097 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2098 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2100 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
2101 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2103 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2104 fdir_desc->rsvd = cpu_to_le32(0);
2105 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2106 fdir_desc->fd_id = cpu_to_le32(0);
2110 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2112 * @tx_ring: ring to send buffer on
2113 * @flags: the tx flags to be set
2115 * Checks the skb and set up correspondingly several generic transmit flags
2116 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2118 * Returns error code indicate the frame should be dropped upon error and the
2119 * otherwise returns 0 to indicate the flags has been set properly.
2122 inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2123 struct i40e_ring *tx_ring,
2126 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2127 struct i40e_ring *tx_ring,
2131 __be16 protocol = skb->protocol;
2134 if (protocol == htons(ETH_P_8021Q) &&
2135 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2136 /* When HW VLAN acceleration is turned off by the user the
2137 * stack sets the protocol to 8021q so that the driver
2138 * can take any steps required to support the SW only
2139 * VLAN handling. In our case the driver doesn't need
2140 * to take any further steps so just set the protocol
2141 * to the encapsulated ethertype.
2143 skb->protocol = vlan_get_protocol(skb);
2147 /* if we have a HW VLAN tag being added, default to the HW one */
2148 if (skb_vlan_tag_present(skb)) {
2149 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2150 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2151 /* else if it is a SW VLAN, check the next protocol and store the tag */
2152 } else if (protocol == htons(ETH_P_8021Q)) {
2153 struct vlan_hdr *vhdr, _vhdr;
2155 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2159 protocol = vhdr->h_vlan_encapsulated_proto;
2160 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2161 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2164 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2167 /* Insert 802.1p priority into VLAN header */
2168 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2169 (skb->priority != TC_PRIO_CONTROL)) {
2170 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2171 tx_flags |= (skb->priority & 0x7) <<
2172 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2173 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2174 struct vlan_ethhdr *vhdr;
2177 rc = skb_cow_head(skb, 0);
2180 vhdr = (struct vlan_ethhdr *)skb->data;
2181 vhdr->h_vlan_TCI = htons(tx_flags >>
2182 I40E_TX_FLAGS_VLAN_SHIFT);
2184 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2194 * i40e_tso - set up the tso context descriptor
2195 * @tx_ring: ptr to the ring to send
2196 * @skb: ptr to the skb we're sending
2197 * @hdr_len: ptr to the size of the packet header
2198 * @cd_type_cmd_tso_mss: Quad Word 1
2200 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2202 static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
2203 u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
2205 u32 cd_cmd, cd_tso_len, cd_mss;
2206 struct ipv6hdr *ipv6h;
2207 struct tcphdr *tcph;
2212 if (!skb_is_gso(skb))
2215 err = skb_cow_head(skb, 0);
2219 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
2220 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
2222 if (iph->version == 4) {
2223 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
2226 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
2228 } else if (ipv6h->version == 6) {
2229 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
2230 ipv6h->payload_len = 0;
2231 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
2235 l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
2236 *hdr_len = (skb->encapsulation
2237 ? (skb_inner_transport_header(skb) - skb->data)
2238 : skb_transport_offset(skb)) + l4len;
2240 /* find the field values */
2241 cd_cmd = I40E_TX_CTX_DESC_TSO;
2242 cd_tso_len = skb->len - *hdr_len;
2243 cd_mss = skb_shinfo(skb)->gso_size;
2244 *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2246 I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2247 ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
2252 * i40e_tsyn - set up the tsyn context descriptor
2253 * @tx_ring: ptr to the ring to send
2254 * @skb: ptr to the skb we're sending
2255 * @tx_flags: the collected send information
2256 * @cd_type_cmd_tso_mss: Quad Word 1
2258 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2260 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2261 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2265 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2268 /* Tx timestamps cannot be sampled when doing TSO */
2269 if (tx_flags & I40E_TX_FLAGS_TSO)
2272 /* only timestamp the outbound packet if the user has requested it and
2273 * we are not already transmitting a packet to be timestamped
2275 pf = i40e_netdev_to_pf(tx_ring->netdev);
2276 if (!(pf->flags & I40E_FLAG_PTP))
2280 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
2281 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2282 pf->ptp_tx_skb = skb_get(skb);
2287 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
2288 I40E_TXD_CTX_QW1_CMD_SHIFT;
2294 * i40e_tx_enable_csum - Enable Tx checksum offloads
2296 * @tx_flags: pointer to Tx flags currently set
2297 * @td_cmd: Tx descriptor command bits to set
2298 * @td_offset: Tx descriptor header offsets to set
2299 * @tx_ring: Tx descriptor ring
2300 * @cd_tunneling: ptr to context desc bits
2302 static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
2303 u32 *td_cmd, u32 *td_offset,
2304 struct i40e_ring *tx_ring,
2307 struct ipv6hdr *this_ipv6_hdr;
2308 unsigned int this_tcp_hdrlen;
2309 struct iphdr *this_ip_hdr;
2310 u32 network_hdr_len;
2312 struct udphdr *oudph;
2316 if (skb->encapsulation) {
2317 switch (ip_hdr(skb)->protocol) {
2319 oudph = udp_hdr(skb);
2321 l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
2322 *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
2325 l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING;
2330 network_hdr_len = skb_inner_network_header_len(skb);
2331 this_ip_hdr = inner_ip_hdr(skb);
2332 this_ipv6_hdr = inner_ipv6_hdr(skb);
2333 this_tcp_hdrlen = inner_tcp_hdrlen(skb);
2335 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2336 if (*tx_flags & I40E_TX_FLAGS_TSO) {
2337 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
2338 ip_hdr(skb)->check = 0;
2341 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2343 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2344 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
2345 if (*tx_flags & I40E_TX_FLAGS_TSO)
2346 ip_hdr(skb)->check = 0;
2349 /* Now set the ctx descriptor fields */
2350 *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
2351 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
2353 ((skb_inner_network_offset(skb) -
2354 skb_transport_offset(skb)) >> 1) <<
2355 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
2356 if (this_ip_hdr->version == 6) {
2357 *tx_flags &= ~I40E_TX_FLAGS_IPV4;
2358 *tx_flags |= I40E_TX_FLAGS_IPV6;
2360 if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) &&
2361 (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) &&
2362 (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) {
2363 oudph->check = ~csum_tcpudp_magic(oiph->saddr,
2365 (skb->len - skb_transport_offset(skb)),
2367 *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
2370 network_hdr_len = skb_network_header_len(skb);
2371 this_ip_hdr = ip_hdr(skb);
2372 this_ipv6_hdr = ipv6_hdr(skb);
2373 this_tcp_hdrlen = tcp_hdrlen(skb);
2376 /* Enable IP checksum offloads */
2377 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2378 l4_hdr = this_ip_hdr->protocol;
2379 /* the stack computes the IP header already, the only time we
2380 * need the hardware to recompute it is in the case of TSO.
2382 if (*tx_flags & I40E_TX_FLAGS_TSO) {
2383 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
2384 this_ip_hdr->check = 0;
2386 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
2388 /* Now set the td_offset for IP header length */
2389 *td_offset = (network_hdr_len >> 2) <<
2390 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2391 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2392 l4_hdr = this_ipv6_hdr->nexthdr;
2393 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
2394 /* Now set the td_offset for IP header length */
2395 *td_offset = (network_hdr_len >> 2) <<
2396 I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2398 /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
2399 *td_offset |= (skb_network_offset(skb) >> 1) <<
2400 I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
2402 /* Enable L4 checksum offloads */
2405 /* enable checksum offloads */
2406 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
2407 *td_offset |= (this_tcp_hdrlen >> 2) <<
2408 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2411 /* enable SCTP checksum offload */
2412 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
2413 *td_offset |= (sizeof(struct sctphdr) >> 2) <<
2414 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2417 /* enable UDP checksum offload */
2418 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
2419 *td_offset |= (sizeof(struct udphdr) >> 2) <<
2420 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2428 * i40e_create_tx_ctx Build the Tx context descriptor
2429 * @tx_ring: ring to create the descriptor on
2430 * @cd_type_cmd_tso_mss: Quad Word 1
2431 * @cd_tunneling: Quad Word 0 - bits 0-31
2432 * @cd_l2tag2: Quad Word 0 - bits 32-63
2434 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
2435 const u64 cd_type_cmd_tso_mss,
2436 const u32 cd_tunneling, const u32 cd_l2tag2)
2438 struct i40e_tx_context_desc *context_desc;
2439 int i = tx_ring->next_to_use;
2441 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
2442 !cd_tunneling && !cd_l2tag2)
2445 /* grab the next descriptor */
2446 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
2449 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2451 /* cpu_to_le32 and assign to struct fields */
2452 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2453 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
2454 context_desc->rsvd = cpu_to_le16(0);
2455 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2459 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2460 * @tx_ring: the ring to be checked
2461 * @size: the size buffer we want to assure is available
2463 * Returns -EBUSY if a stop is needed, else 0
2465 static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2467 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2468 /* Memory barrier before checking head and tail */
2471 /* Check again in a case another CPU has just made room available. */
2472 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2475 /* A reprieve! - use start_queue because it doesn't call schedule */
2476 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2477 ++tx_ring->tx_stats.restart_queue;
2482 * i40e_maybe_stop_tx - 1st level check for tx stop conditions
2483 * @tx_ring: the ring to be checked
2484 * @size: the size buffer we want to assure is available
2486 * Returns 0 if stop is not needed
2489 inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2491 static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2494 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
2496 return __i40e_maybe_stop_tx(tx_ring, size);
2500 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
2502 * @tx_flags: collected send information
2504 * Note: Our HW can't scatter-gather more than 8 fragments to build
2505 * a packet on the wire and so we need to figure out the cases where we
2506 * need to linearize the skb.
2508 static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
2510 struct skb_frag_struct *frag;
2511 bool linearize = false;
2512 unsigned int size = 0;
2516 num_frags = skb_shinfo(skb)->nr_frags;
2517 gso_segs = skb_shinfo(skb)->gso_segs;
2519 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
2522 if (num_frags < (I40E_MAX_BUFFER_TXD))
2523 goto linearize_chk_done;
2524 /* try the simple math, if we have too many frags per segment */
2525 if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
2526 I40E_MAX_BUFFER_TXD) {
2528 goto linearize_chk_done;
2530 frag = &skb_shinfo(skb)->frags[0];
2531 /* we might still have more fragments per segment */
2533 size += skb_frag_size(frag);
2535 if ((size >= skb_shinfo(skb)->gso_size) &&
2536 (j < I40E_MAX_BUFFER_TXD)) {
2537 size = (size % skb_shinfo(skb)->gso_size);
2540 if (j == I40E_MAX_BUFFER_TXD) {
2545 } while (num_frags);
2547 if (num_frags >= I40E_MAX_BUFFER_TXD)
2556 * i40e_tx_map - Build the Tx descriptor
2557 * @tx_ring: ring to send buffer on
2559 * @first: first buffer info buffer to use
2560 * @tx_flags: collected send information
2561 * @hdr_len: size of the packet header
2562 * @td_cmd: the command field in the descriptor
2563 * @td_offset: offset for checksum or crc
2566 inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2567 struct i40e_tx_buffer *first, u32 tx_flags,
2568 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2570 static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2571 struct i40e_tx_buffer *first, u32 tx_flags,
2572 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2575 unsigned int data_len = skb->data_len;
2576 unsigned int size = skb_headlen(skb);
2577 struct skb_frag_struct *frag;
2578 struct i40e_tx_buffer *tx_bi;
2579 struct i40e_tx_desc *tx_desc;
2580 u16 i = tx_ring->next_to_use;
2585 bool tail_bump = true;
2588 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
2589 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
2590 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
2591 I40E_TX_FLAGS_VLAN_SHIFT;
2594 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
2595 gso_segs = skb_shinfo(skb)->gso_segs;
2599 /* multiply data chunks by size of headers */
2600 first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
2601 first->gso_segs = gso_segs;
2603 first->tx_flags = tx_flags;
2605 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2607 tx_desc = I40E_TX_DESC(tx_ring, i);
2610 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2611 if (dma_mapping_error(tx_ring->dev, dma))
2614 /* record length, and DMA address */
2615 dma_unmap_len_set(tx_bi, len, size);
2616 dma_unmap_addr_set(tx_bi, dma, dma);
2618 tx_desc->buffer_addr = cpu_to_le64(dma);
2620 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
2621 tx_desc->cmd_type_offset_bsz =
2622 build_ctob(td_cmd, td_offset,
2623 I40E_MAX_DATA_PER_TXD, td_tag);
2629 if (i == tx_ring->count) {
2630 tx_desc = I40E_TX_DESC(tx_ring, 0);
2634 dma += I40E_MAX_DATA_PER_TXD;
2635 size -= I40E_MAX_DATA_PER_TXD;
2637 tx_desc->buffer_addr = cpu_to_le64(dma);
2640 if (likely(!data_len))
2643 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2650 if (i == tx_ring->count) {
2651 tx_desc = I40E_TX_DESC(tx_ring, 0);
2655 size = skb_frag_size(frag);
2658 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2661 tx_bi = &tx_ring->tx_bi[i];
2664 /* set next_to_watch value indicating a packet is present */
2665 first->next_to_watch = tx_desc;
2668 if (i == tx_ring->count)
2671 tx_ring->next_to_use = i;
2673 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
2674 tx_ring->queue_index),
2676 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
2678 /* Algorithm to optimize tail and RS bit setting:
2679 * if xmit_more is supported
2680 * if xmit_more is true
2681 * do not update tail and do not mark RS bit.
2682 * if xmit_more is false and last xmit_more was false
2683 * if every packet spanned less than 4 desc
2684 * then set RS bit on 4th packet and update tail
2687 * update tail and set RS bit on every packet.
2688 * if xmit_more is false and last_xmit_more was true
2689 * update tail and set RS bit.
2691 * Optimization: wmb to be issued only in case of tail update.
2692 * Also optimize the Descriptor WB path for RS bit with the same
2695 * Note: If there are less than 4 packets
2696 * pending and interrupts were disabled the service task will
2697 * trigger a force WB.
2699 if (skb->xmit_more &&
2700 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2701 tx_ring->queue_index))) {
2702 tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2704 } else if (!skb->xmit_more &&
2705 !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2706 tx_ring->queue_index)) &&
2707 (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
2708 (tx_ring->packet_stride < WB_STRIDE) &&
2709 (desc_count < WB_STRIDE)) {
2710 tx_ring->packet_stride++;
2712 tx_ring->packet_stride = 0;
2713 tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2717 tx_ring->packet_stride = 0;
2719 tx_desc->cmd_type_offset_bsz =
2720 build_ctob(td_cmd, td_offset, size, td_tag) |
2721 cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
2722 I40E_TX_DESC_CMD_EOP) <<
2723 I40E_TXD_QW1_CMD_SHIFT);
2725 /* notify HW of packet */
2727 prefetchw(tx_desc + 1);
2730 /* Force memory writes to complete before letting h/w
2731 * know there are new descriptors to fetch. (Only
2732 * applicable for weak-ordered memory model archs,
2736 writel(i, tx_ring->tail);
2742 dev_info(tx_ring->dev, "TX DMA map failed\n");
2744 /* clear dma mappings for failed tx_bi map */
2746 tx_bi = &tx_ring->tx_bi[i];
2747 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
2755 tx_ring->next_to_use = i;
2759 * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
2761 * @tx_ring: ring to send buffer on
2763 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
2764 * there is not enough descriptors available in this ring since we need at least
2768 inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
2769 struct i40e_ring *tx_ring)
2771 static inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
2772 struct i40e_ring *tx_ring)
2778 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
2779 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
2780 * + 4 desc gap to avoid the cache line where head is,
2781 * + 1 desc for context descriptor,
2782 * otherwise try next time
2784 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2785 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2787 count += TXD_USE_COUNT(skb_headlen(skb));
2788 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2789 tx_ring->tx_stats.tx_busy++;
2796 * i40e_xmit_frame_ring - Sends buffer on Tx ring
2798 * @tx_ring: ring to send buffer on
2800 * Returns NETDEV_TX_OK if sent, else an error code
2802 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2803 struct i40e_ring *tx_ring)
2805 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
2806 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2807 struct i40e_tx_buffer *first;
2816 /* prefetch the data, we'll need it later */
2817 prefetch(skb->data);
2819 if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
2820 return NETDEV_TX_BUSY;
2822 /* prepare the xmit flags */
2823 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2826 /* obtain protocol of skb */
2827 protocol = vlan_get_protocol(skb);
2829 /* record the location of the first descriptor for this packet */
2830 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2832 /* setup IPv4/IPv6 offloads */
2833 if (protocol == htons(ETH_P_IP))
2834 tx_flags |= I40E_TX_FLAGS_IPV4;
2835 else if (protocol == htons(ETH_P_IPV6))
2836 tx_flags |= I40E_TX_FLAGS_IPV6;
2838 tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss);
2843 tx_flags |= I40E_TX_FLAGS_TSO;
2845 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
2848 tx_flags |= I40E_TX_FLAGS_TSYN;
2850 if (i40e_chk_linearize(skb, tx_flags)) {
2851 if (skb_linearize(skb))
2853 tx_ring->tx_stats.tx_linearize++;
2855 skb_tx_timestamp(skb);
2857 /* always enable CRC insertion offload */
2858 td_cmd |= I40E_TX_DESC_CMD_ICRC;
2860 /* Always offload the checksum, since it's in the data descriptor */
2861 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2862 tx_flags |= I40E_TX_FLAGS_CSUM;
2864 i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2865 tx_ring, &cd_tunneling);
2868 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2869 cd_tunneling, cd_l2tag2);
2871 /* Add Flow Director ATR if it's enabled.
2873 * NOTE: this must always be directly before the data descriptor.
2875 i40e_atr(tx_ring, skb, tx_flags, protocol);
2877 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2880 return NETDEV_TX_OK;
2883 dev_kfree_skb_any(skb);
2884 return NETDEV_TX_OK;
2888 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2890 * @netdev: network interface device structure
2892 * Returns NETDEV_TX_OK if sent, else an error code
2894 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2896 struct i40e_netdev_priv *np = netdev_priv(netdev);
2897 struct i40e_vsi *vsi = np->vsi;
2898 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
2900 /* hardware can't handle really short frames, hardware padding works
2903 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
2904 return NETDEV_TX_OK;
2906 return i40e_xmit_frame_ring(skb, tx_ring);