1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2016 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
27 #include <linux/prefetch.h>
28 #include <net/busy_poll.h>
30 #include "i40e_prototype.h"
32 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
35 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
36 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
37 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
38 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
39 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
42 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
44 * i40e_fdir - Generate a Flow Director descriptor based on fdata
45 * @tx_ring: Tx ring to send buffer on
46 * @fdata: Flow director filter data
47 * @add: Indicate if we are adding a rule or deleting one
50 static void i40e_fdir(struct i40e_ring *tx_ring,
51 struct i40e_fdir_filter *fdata, bool add)
53 struct i40e_filter_program_desc *fdir_desc;
54 struct i40e_pf *pf = tx_ring->vsi->back;
55 u32 flex_ptype, dtype_cmd;
58 /* grab the next descriptor */
59 i = tx_ring->next_to_use;
60 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
63 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
65 flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
66 (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
68 flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
69 (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
71 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
72 (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
74 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
75 (fdata->flex_offset << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
77 /* Use LAN VSI Id if not programmed by user */
78 flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
79 ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
80 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
82 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
85 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
86 I40E_TXD_FLTR_QW1_PCMD_SHIFT :
87 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
88 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
90 dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
91 (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
93 dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
94 (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
96 if (fdata->cnt_index) {
97 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
98 dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
99 ((u32)fdata->cnt_index <<
100 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
103 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
104 fdir_desc->rsvd = cpu_to_le32(0);
105 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
106 fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
109 #define I40E_FD_CLEAN_DELAY 10
111 * i40e_program_fdir_filter - Program a Flow Director filter
112 * @fdir_data: Packet data that will be filter parameters
113 * @raw_packet: the pre-allocated packet buffer for FDir
114 * @pf: The PF pointer
115 * @add: True for add/update, False for remove
117 static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
118 u8 *raw_packet, struct i40e_pf *pf,
121 struct i40e_tx_buffer *tx_buf, *first;
122 struct i40e_tx_desc *tx_desc;
123 struct i40e_ring *tx_ring;
124 struct i40e_vsi *vsi;
130 /* find existing FDIR VSI */
131 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
135 tx_ring = vsi->tx_rings[0];
138 /* we need two descriptors to add/del a filter and we can wait */
139 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
142 msleep_interruptible(1);
145 dma = dma_map_single(dev, raw_packet,
146 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
147 if (dma_mapping_error(dev, dma))
150 /* grab the next descriptor */
151 i = tx_ring->next_to_use;
152 first = &tx_ring->tx_bi[i];
153 i40e_fdir(tx_ring, fdir_data, add);
155 /* Now program a dummy descriptor */
156 i = tx_ring->next_to_use;
157 tx_desc = I40E_TX_DESC(tx_ring, i);
158 tx_buf = &tx_ring->tx_bi[i];
160 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
162 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
164 /* record length, and DMA address */
165 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
166 dma_unmap_addr_set(tx_buf, dma, dma);
168 tx_desc->buffer_addr = cpu_to_le64(dma);
169 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
171 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
172 tx_buf->raw_buf = (void *)raw_packet;
174 tx_desc->cmd_type_offset_bsz =
175 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
177 /* Force memory writes to complete before letting h/w
178 * know there are new descriptors to fetch.
182 /* Mark the data descriptor to be watched */
183 first->next_to_watch = tx_desc;
185 writel(tx_ring->next_to_use, tx_ring->tail);
192 #define IP_HEADER_OFFSET 14
193 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
195 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
196 * @vsi: pointer to the targeted VSI
197 * @fd_data: the flow director data required for the FDir descriptor
198 * @add: true adds a filter, false removes it
200 * Returns 0 if the filters were successfully added or removed
202 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
203 struct i40e_fdir_filter *fd_data,
206 struct i40e_pf *pf = vsi->back;
211 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
212 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
213 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
215 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
218 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
220 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
221 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
222 + sizeof(struct iphdr));
224 ip->daddr = fd_data->dst_ip;
225 udp->dest = fd_data->dst_port;
226 ip->saddr = fd_data->src_ip;
227 udp->source = fd_data->src_port;
229 if (fd_data->flex_filter) {
230 u8 *payload = raw_packet + I40E_UDPIP_DUMMY_PACKET_LEN;
231 __be16 pattern = fd_data->flex_word;
232 u16 off = fd_data->flex_offset;
234 *((__force __be16 *)(payload + off)) = pattern;
237 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
238 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
240 dev_info(&pf->pdev->dev,
241 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
242 fd_data->pctype, fd_data->fd_id, ret);
243 /* Free the packet buffer since it wasn't added to the ring */
246 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
248 dev_info(&pf->pdev->dev,
249 "Filter OK for PCTYPE %d loc = %d\n",
250 fd_data->pctype, fd_data->fd_id);
252 dev_info(&pf->pdev->dev,
253 "Filter deleted for PCTYPE %d loc = %d\n",
254 fd_data->pctype, fd_data->fd_id);
258 pf->fd_udp4_filter_cnt++;
260 pf->fd_udp4_filter_cnt--;
265 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
267 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
268 * @vsi: pointer to the targeted VSI
269 * @fd_data: the flow director data required for the FDir descriptor
270 * @add: true adds a filter, false removes it
272 * Returns 0 if the filters were successfully added or removed
274 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
275 struct i40e_fdir_filter *fd_data,
278 struct i40e_pf *pf = vsi->back;
284 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
285 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
286 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
287 0x0, 0x72, 0, 0, 0, 0};
289 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
292 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
294 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
295 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
296 + sizeof(struct iphdr));
298 ip->daddr = fd_data->dst_ip;
299 tcp->dest = fd_data->dst_port;
300 ip->saddr = fd_data->src_ip;
301 tcp->source = fd_data->src_port;
303 if (fd_data->flex_filter) {
304 u8 *payload = raw_packet + I40E_TCPIP_DUMMY_PACKET_LEN;
305 __be16 pattern = fd_data->flex_word;
306 u16 off = fd_data->flex_offset;
308 *((__force __be16 *)(payload + off)) = pattern;
311 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
312 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
314 dev_info(&pf->pdev->dev,
315 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
316 fd_data->pctype, fd_data->fd_id, ret);
317 /* Free the packet buffer since it wasn't added to the ring */
320 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
322 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
323 fd_data->pctype, fd_data->fd_id);
325 dev_info(&pf->pdev->dev,
326 "Filter deleted for PCTYPE %d loc = %d\n",
327 fd_data->pctype, fd_data->fd_id);
331 pf->fd_tcp4_filter_cnt++;
332 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
333 I40E_DEBUG_FD & pf->hw.debug_mask)
334 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
335 pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED;
337 pf->fd_tcp4_filter_cnt--;
338 if (pf->fd_tcp4_filter_cnt == 0) {
339 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
340 I40E_DEBUG_FD & pf->hw.debug_mask)
341 dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
342 pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
349 #define I40E_SCTPIP_DUMMY_PACKET_LEN 46
351 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
352 * a specific flow spec
353 * @vsi: pointer to the targeted VSI
354 * @fd_data: the flow director data required for the FDir descriptor
355 * @add: true adds a filter, false removes it
357 * Returns 0 if the filters were successfully added or removed
359 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
360 struct i40e_fdir_filter *fd_data,
363 struct i40e_pf *pf = vsi->back;
364 struct sctphdr *sctp;
369 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
370 0x45, 0, 0, 0x20, 0, 0, 0x40, 0, 0x40, 0x84, 0, 0, 0, 0, 0, 0,
371 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
373 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
376 memcpy(raw_packet, packet, I40E_SCTPIP_DUMMY_PACKET_LEN);
378 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
379 sctp = (struct sctphdr *)(raw_packet + IP_HEADER_OFFSET
380 + sizeof(struct iphdr));
382 ip->daddr = fd_data->dst_ip;
383 sctp->dest = fd_data->dst_port;
384 ip->saddr = fd_data->src_ip;
385 sctp->source = fd_data->src_port;
387 if (fd_data->flex_filter) {
388 u8 *payload = raw_packet + I40E_SCTPIP_DUMMY_PACKET_LEN;
389 __be16 pattern = fd_data->flex_word;
390 u16 off = fd_data->flex_offset;
392 *((__force __be16 *)(payload + off)) = pattern;
395 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
396 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
398 dev_info(&pf->pdev->dev,
399 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
400 fd_data->pctype, fd_data->fd_id, ret);
401 /* Free the packet buffer since it wasn't added to the ring */
404 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
406 dev_info(&pf->pdev->dev,
407 "Filter OK for PCTYPE %d loc = %d\n",
408 fd_data->pctype, fd_data->fd_id);
410 dev_info(&pf->pdev->dev,
411 "Filter deleted for PCTYPE %d loc = %d\n",
412 fd_data->pctype, fd_data->fd_id);
416 pf->fd_sctp4_filter_cnt++;
418 pf->fd_sctp4_filter_cnt--;
423 #define I40E_IP_DUMMY_PACKET_LEN 34
425 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
426 * a specific flow spec
427 * @vsi: pointer to the targeted VSI
428 * @fd_data: the flow director data required for the FDir descriptor
429 * @add: true adds a filter, false removes it
431 * Returns 0 if the filters were successfully added or removed
433 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
434 struct i40e_fdir_filter *fd_data,
437 struct i40e_pf *pf = vsi->back;
442 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
443 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
446 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
447 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
448 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
451 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
452 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
454 ip->saddr = fd_data->src_ip;
455 ip->daddr = fd_data->dst_ip;
458 if (fd_data->flex_filter) {
459 u8 *payload = raw_packet + I40E_IP_DUMMY_PACKET_LEN;
460 __be16 pattern = fd_data->flex_word;
461 u16 off = fd_data->flex_offset;
463 *((__force __be16 *)(payload + off)) = pattern;
467 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
469 dev_info(&pf->pdev->dev,
470 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
471 fd_data->pctype, fd_data->fd_id, ret);
472 /* The packet buffer wasn't added to the ring so we
473 * need to free it now.
477 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
479 dev_info(&pf->pdev->dev,
480 "Filter OK for PCTYPE %d loc = %d\n",
481 fd_data->pctype, fd_data->fd_id);
483 dev_info(&pf->pdev->dev,
484 "Filter deleted for PCTYPE %d loc = %d\n",
485 fd_data->pctype, fd_data->fd_id);
490 pf->fd_ip4_filter_cnt++;
492 pf->fd_ip4_filter_cnt--;
498 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
499 * @vsi: pointer to the targeted VSI
500 * @cmd: command to get or set RX flow classification rules
501 * @add: true adds a filter, false removes it
504 int i40e_add_del_fdir(struct i40e_vsi *vsi,
505 struct i40e_fdir_filter *input, bool add)
507 struct i40e_pf *pf = vsi->back;
510 switch (input->flow_type & ~FLOW_EXT) {
512 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
515 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
518 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
521 switch (input->ip4_proto) {
523 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
526 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
529 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
532 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
535 /* We cannot support masking based on protocol */
536 goto unsupported_flow;
541 dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
546 /* The buffer allocated here will be normally be freed by
547 * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit
548 * completion. In the event of an error adding the buffer to the FDIR
549 * ring, it will immediately be freed. It may also be freed by
550 * i40e_clean_tx_ring() when closing the VSI.
556 * i40e_fd_handle_status - check the Programming Status for FD
557 * @rx_ring: the Rx ring for this descriptor
558 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
559 * @prog_id: the id originally used for programming
561 * This is used to verify if the FD programming or invalidation
562 * requested by SW to the HW is successful or not and take actions accordingly.
564 static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
565 union i40e_rx_desc *rx_desc, u8 prog_id)
567 struct i40e_pf *pf = rx_ring->vsi->back;
568 struct pci_dev *pdev = pf->pdev;
569 u32 fcnt_prog, fcnt_avail;
573 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
574 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
575 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
577 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
578 pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
579 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
580 (I40E_DEBUG_FD & pf->hw.debug_mask))
581 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
584 /* Check if the programming error is for ATR.
585 * If so, auto disable ATR and set a state for
586 * flush in progress. Next time we come here if flush is in
587 * progress do nothing, once flush is complete the state will
590 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
594 /* store the current atr filter count */
595 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
597 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
598 (pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED)) {
599 pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED;
600 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
603 /* filter programming failed most likely due to table full */
604 fcnt_prog = i40e_get_global_fd_count(pf);
605 fcnt_avail = pf->fdir_pf_filter_count;
606 /* If ATR is running fcnt_prog can quickly change,
607 * if we are very close to full, it makes sense to disable
608 * FD ATR/SB and then re-enable it when there is room.
610 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
611 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
612 !(pf->hw_disabled_flags &
613 I40E_FLAG_FD_SB_ENABLED)) {
614 if (I40E_DEBUG_FD & pf->hw.debug_mask)
615 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
616 pf->hw_disabled_flags |=
617 I40E_FLAG_FD_SB_ENABLED;
620 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
621 if (I40E_DEBUG_FD & pf->hw.debug_mask)
622 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
623 rx_desc->wb.qword0.hi_dword.fd_id);
628 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
629 * @ring: the ring that owns the buffer
630 * @tx_buffer: the buffer to free
632 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
633 struct i40e_tx_buffer *tx_buffer)
635 if (tx_buffer->skb) {
636 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
637 kfree(tx_buffer->raw_buf);
639 dev_kfree_skb_any(tx_buffer->skb);
640 if (dma_unmap_len(tx_buffer, len))
641 dma_unmap_single(ring->dev,
642 dma_unmap_addr(tx_buffer, dma),
643 dma_unmap_len(tx_buffer, len),
645 } else if (dma_unmap_len(tx_buffer, len)) {
646 dma_unmap_page(ring->dev,
647 dma_unmap_addr(tx_buffer, dma),
648 dma_unmap_len(tx_buffer, len),
652 tx_buffer->next_to_watch = NULL;
653 tx_buffer->skb = NULL;
654 dma_unmap_len_set(tx_buffer, len, 0);
655 /* tx_buffer must be completely set up in the transmit path */
659 * i40e_clean_tx_ring - Free any empty Tx buffers
660 * @tx_ring: ring to be cleaned
662 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
664 unsigned long bi_size;
667 /* ring already cleared, nothing to do */
671 /* Free all the Tx ring sk_buffs */
672 for (i = 0; i < tx_ring->count; i++)
673 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
675 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
676 memset(tx_ring->tx_bi, 0, bi_size);
678 /* Zero out the descriptor ring */
679 memset(tx_ring->desc, 0, tx_ring->size);
681 tx_ring->next_to_use = 0;
682 tx_ring->next_to_clean = 0;
684 if (!tx_ring->netdev)
687 /* cleanup Tx queue statistics */
688 netdev_tx_reset_queue(txring_txq(tx_ring));
692 * i40e_free_tx_resources - Free Tx resources per queue
693 * @tx_ring: Tx descriptor ring for a specific queue
695 * Free all transmit software resources
697 void i40e_free_tx_resources(struct i40e_ring *tx_ring)
699 i40e_clean_tx_ring(tx_ring);
700 kfree(tx_ring->tx_bi);
701 tx_ring->tx_bi = NULL;
704 dma_free_coherent(tx_ring->dev, tx_ring->size,
705 tx_ring->desc, tx_ring->dma);
706 tx_ring->desc = NULL;
711 * i40e_get_tx_pending - how many tx descriptors not processed
712 * @tx_ring: the ring of descriptors
713 * @in_sw: is tx_pending being checked in SW or HW
715 * Since there is no access to the ring head register
716 * in XL710, we need to use our local copies
718 u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
723 head = i40e_get_head(ring);
725 head = ring->next_to_clean;
726 tail = readl(ring->tail);
729 return (head < tail) ?
730 tail - head : (tail + ring->count - head);
738 * i40e_clean_tx_irq - Reclaim resources after transmit completes
739 * @vsi: the VSI we care about
740 * @tx_ring: Tx ring to clean
741 * @napi_budget: Used to determine if we are in netpoll
743 * Returns true if there's any budget left (e.g. the clean is finished)
745 static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
746 struct i40e_ring *tx_ring, int napi_budget)
748 u16 i = tx_ring->next_to_clean;
749 struct i40e_tx_buffer *tx_buf;
750 struct i40e_tx_desc *tx_head;
751 struct i40e_tx_desc *tx_desc;
752 unsigned int total_bytes = 0, total_packets = 0;
753 unsigned int budget = vsi->work_limit;
755 tx_buf = &tx_ring->tx_bi[i];
756 tx_desc = I40E_TX_DESC(tx_ring, i);
759 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
762 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
764 /* if next_to_watch is not set then there is no work pending */
768 /* prevent any other reads prior to eop_desc */
769 read_barrier_depends();
771 /* we have caught up to head, no work left to do */
772 if (tx_head == tx_desc)
775 /* clear next_to_watch to prevent false hangs */
776 tx_buf->next_to_watch = NULL;
778 /* update the statistics for this packet */
779 total_bytes += tx_buf->bytecount;
780 total_packets += tx_buf->gso_segs;
783 napi_consume_skb(tx_buf->skb, napi_budget);
785 /* unmap skb header data */
786 dma_unmap_single(tx_ring->dev,
787 dma_unmap_addr(tx_buf, dma),
788 dma_unmap_len(tx_buf, len),
791 /* clear tx_buffer data */
793 dma_unmap_len_set(tx_buf, len, 0);
795 /* unmap remaining buffers */
796 while (tx_desc != eop_desc) {
803 tx_buf = tx_ring->tx_bi;
804 tx_desc = I40E_TX_DESC(tx_ring, 0);
807 /* unmap any remaining paged data */
808 if (dma_unmap_len(tx_buf, len)) {
809 dma_unmap_page(tx_ring->dev,
810 dma_unmap_addr(tx_buf, dma),
811 dma_unmap_len(tx_buf, len),
813 dma_unmap_len_set(tx_buf, len, 0);
817 /* move us one more past the eop_desc for start of next pkt */
823 tx_buf = tx_ring->tx_bi;
824 tx_desc = I40E_TX_DESC(tx_ring, 0);
829 /* update budget accounting */
831 } while (likely(budget));
834 tx_ring->next_to_clean = i;
835 u64_stats_update_begin(&tx_ring->syncp);
836 tx_ring->stats.bytes += total_bytes;
837 tx_ring->stats.packets += total_packets;
838 u64_stats_update_end(&tx_ring->syncp);
839 tx_ring->q_vector->tx.total_bytes += total_bytes;
840 tx_ring->q_vector->tx.total_packets += total_packets;
842 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
843 /* check to see if there are < 4 descriptors
844 * waiting to be written back, then kick the hardware to force
845 * them to be written back in case we stay in NAPI.
846 * In this mode on X722 we do not enable Interrupt.
848 unsigned int j = i40e_get_tx_pending(tx_ring, false);
851 ((j / WB_STRIDE) == 0) && (j > 0) &&
852 !test_bit(__I40E_DOWN, &vsi->state) &&
853 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
854 tx_ring->arm_wb = true;
857 /* notify netdev of completed buffers */
858 netdev_tx_completed_queue(txring_txq(tx_ring),
859 total_packets, total_bytes);
861 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
862 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
863 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
864 /* Make sure that anybody stopping the queue after this
865 * sees the new next_to_clean.
868 if (__netif_subqueue_stopped(tx_ring->netdev,
869 tx_ring->queue_index) &&
870 !test_bit(__I40E_DOWN, &vsi->state)) {
871 netif_wake_subqueue(tx_ring->netdev,
872 tx_ring->queue_index);
873 ++tx_ring->tx_stats.restart_queue;
881 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
882 * @vsi: the VSI we care about
883 * @q_vector: the vector on which to enable writeback
886 static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
887 struct i40e_q_vector *q_vector)
889 u16 flags = q_vector->tx.ring[0].flags;
892 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
895 if (q_vector->arm_wb_state)
898 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
899 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
900 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
903 I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
906 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
907 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
909 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
911 q_vector->arm_wb_state = true;
915 * i40e_force_wb - Issue SW Interrupt so HW does a wb
916 * @vsi: the VSI we care about
917 * @q_vector: the vector on which to force writeback
920 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
922 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
923 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
924 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
925 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
926 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
927 /* allow 00 to be written to the index */
930 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
931 vsi->base_vector - 1), val);
933 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
934 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
935 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
936 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
937 /* allow 00 to be written to the index */
939 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
944 * i40e_set_new_dynamic_itr - Find new ITR level
945 * @rc: structure containing ring performance data
947 * Returns true if ITR changed, false if not
949 * Stores a new ITR value based on packets and byte counts during
950 * the last interrupt. The advantage of per interrupt computation
951 * is faster updates and more accurate ITR for the current traffic
952 * pattern. Constants in this function were computed based on
953 * theoretical maximum wire speed and thresholds were set based on
954 * testing data as well as attempting to minimize response time
955 * while increasing bulk throughput.
957 static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
959 enum i40e_latency_range new_latency_range = rc->latency_range;
960 struct i40e_q_vector *qv = rc->ring->q_vector;
961 u32 new_itr = rc->itr;
965 if (rc->total_packets == 0 || !rc->itr)
968 /* simple throttlerate management
969 * 0-10MB/s lowest (50000 ints/s)
970 * 10-20MB/s low (20000 ints/s)
971 * 20-1249MB/s bulk (18000 ints/s)
972 * > 40000 Rx packets per second (8000 ints/s)
974 * The math works out because the divisor is in 10^(-6) which
975 * turns the bytes/us input value into MB/s values, but
976 * make sure to use usecs, as the register values written
977 * are in 2 usec increments in the ITR registers, and make sure
978 * to use the smoothed values that the countdown timer gives us.
980 usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
981 bytes_per_int = rc->total_bytes / usecs;
983 switch (new_latency_range) {
984 case I40E_LOWEST_LATENCY:
985 if (bytes_per_int > 10)
986 new_latency_range = I40E_LOW_LATENCY;
988 case I40E_LOW_LATENCY:
989 if (bytes_per_int > 20)
990 new_latency_range = I40E_BULK_LATENCY;
991 else if (bytes_per_int <= 10)
992 new_latency_range = I40E_LOWEST_LATENCY;
994 case I40E_BULK_LATENCY:
995 case I40E_ULTRA_LATENCY:
997 if (bytes_per_int <= 20)
998 new_latency_range = I40E_LOW_LATENCY;
1002 /* this is to adjust RX more aggressively when streaming small
1003 * packets. The value of 40000 was picked as it is just beyond
1004 * what the hardware can receive per second if in low latency
1007 #define RX_ULTRA_PACKET_RATE 40000
1009 if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
1011 new_latency_range = I40E_ULTRA_LATENCY;
1013 rc->latency_range = new_latency_range;
1015 switch (new_latency_range) {
1016 case I40E_LOWEST_LATENCY:
1017 new_itr = I40E_ITR_50K;
1019 case I40E_LOW_LATENCY:
1020 new_itr = I40E_ITR_20K;
1022 case I40E_BULK_LATENCY:
1023 new_itr = I40E_ITR_18K;
1025 case I40E_ULTRA_LATENCY:
1026 new_itr = I40E_ITR_8K;
1032 rc->total_bytes = 0;
1033 rc->total_packets = 0;
1035 if (new_itr != rc->itr) {
1044 * i40e_clean_programming_status - clean the programming status descriptor
1045 * @rx_ring: the rx ring that has this descriptor
1046 * @rx_desc: the rx descriptor written back by HW
1048 * Flow director should handle FD_FILTER_STATUS to check its filter programming
1049 * status being successful or not and take actions accordingly. FCoE should
1050 * handle its context/filter programming/invalidation status and take actions.
1053 static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
1054 union i40e_rx_desc *rx_desc)
1059 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1060 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1061 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1063 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
1064 i40e_fd_handle_status(rx_ring, rx_desc, id);
1068 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
1069 * @tx_ring: the tx ring to set up
1071 * Return 0 on success, negative on error
1073 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
1075 struct device *dev = tx_ring->dev;
1081 /* warn if we are about to overwrite the pointer */
1082 WARN_ON(tx_ring->tx_bi);
1083 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
1084 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
1085 if (!tx_ring->tx_bi)
1088 /* round up to nearest 4K */
1089 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
1090 /* add u32 for head writeback, align after this takes care of
1091 * guaranteeing this is at least one cache line in size
1093 tx_ring->size += sizeof(u32);
1094 tx_ring->size = ALIGN(tx_ring->size, 4096);
1095 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1096 &tx_ring->dma, GFP_KERNEL);
1097 if (!tx_ring->desc) {
1098 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1103 tx_ring->next_to_use = 0;
1104 tx_ring->next_to_clean = 0;
1108 kfree(tx_ring->tx_bi);
1109 tx_ring->tx_bi = NULL;
1114 * i40e_clean_rx_ring - Free Rx buffers
1115 * @rx_ring: ring to be cleaned
1117 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1119 unsigned long bi_size;
1122 /* ring already cleared, nothing to do */
1123 if (!rx_ring->rx_bi)
1127 dev_kfree_skb(rx_ring->skb);
1128 rx_ring->skb = NULL;
1131 /* Free all the Rx ring sk_buffs */
1132 for (i = 0; i < rx_ring->count; i++) {
1133 struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
1138 /* Invalidate cache lines that may have been written to by
1139 * device so that we avoid corrupting memory.
1141 dma_sync_single_range_for_cpu(rx_ring->dev,
1147 /* free resources associated with mapping */
1148 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
1152 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
1155 rx_bi->page_offset = 0;
1158 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1159 memset(rx_ring->rx_bi, 0, bi_size);
1161 /* Zero out the descriptor ring */
1162 memset(rx_ring->desc, 0, rx_ring->size);
1164 rx_ring->next_to_alloc = 0;
1165 rx_ring->next_to_clean = 0;
1166 rx_ring->next_to_use = 0;
1170 * i40e_free_rx_resources - Free Rx resources
1171 * @rx_ring: ring to clean the resources from
1173 * Free all receive software resources
1175 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1177 i40e_clean_rx_ring(rx_ring);
1178 kfree(rx_ring->rx_bi);
1179 rx_ring->rx_bi = NULL;
1181 if (rx_ring->desc) {
1182 dma_free_coherent(rx_ring->dev, rx_ring->size,
1183 rx_ring->desc, rx_ring->dma);
1184 rx_ring->desc = NULL;
1189 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1190 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1192 * Returns 0 on success, negative on failure
1194 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1196 struct device *dev = rx_ring->dev;
1199 /* warn if we are about to overwrite the pointer */
1200 WARN_ON(rx_ring->rx_bi);
1201 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1202 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1203 if (!rx_ring->rx_bi)
1206 u64_stats_init(&rx_ring->syncp);
1208 /* Round up to nearest 4K */
1209 rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1210 rx_ring->size = ALIGN(rx_ring->size, 4096);
1211 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1212 &rx_ring->dma, GFP_KERNEL);
1214 if (!rx_ring->desc) {
1215 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1220 rx_ring->next_to_alloc = 0;
1221 rx_ring->next_to_clean = 0;
1222 rx_ring->next_to_use = 0;
1226 kfree(rx_ring->rx_bi);
1227 rx_ring->rx_bi = NULL;
1232 * i40e_release_rx_desc - Store the new tail and head values
1233 * @rx_ring: ring to bump
1234 * @val: new head index
1236 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1238 rx_ring->next_to_use = val;
1240 /* update next to alloc since we have filled the ring */
1241 rx_ring->next_to_alloc = val;
1243 /* Force memory writes to complete before letting h/w
1244 * know there are new descriptors to fetch. (Only
1245 * applicable for weak-ordered memory model archs,
1249 writel(val, rx_ring->tail);
1253 * i40e_alloc_mapped_page - recycle or make a new page
1254 * @rx_ring: ring to use
1255 * @bi: rx_buffer struct to modify
1257 * Returns true if the page was successfully allocated or
1260 static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1261 struct i40e_rx_buffer *bi)
1263 struct page *page = bi->page;
1266 /* since we are recycling buffers we should seldom need to alloc */
1268 rx_ring->rx_stats.page_reuse_count++;
1272 /* alloc new page for storage */
1273 page = dev_alloc_page();
1274 if (unlikely(!page)) {
1275 rx_ring->rx_stats.alloc_page_failed++;
1279 /* map page for use */
1280 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1285 /* if mapping failed free memory back to system since
1286 * there isn't much point in holding memory we can't use
1288 if (dma_mapping_error(rx_ring->dev, dma)) {
1289 __free_pages(page, 0);
1290 rx_ring->rx_stats.alloc_page_failed++;
1296 bi->page_offset = 0;
1298 /* initialize pagecnt_bias to 1 representing we fully own page */
1299 bi->pagecnt_bias = 1;
1305 * i40e_receive_skb - Send a completed packet up the stack
1306 * @rx_ring: rx ring in play
1307 * @skb: packet to send up
1308 * @vlan_tag: vlan tag for packet
1310 static void i40e_receive_skb(struct i40e_ring *rx_ring,
1311 struct sk_buff *skb, u16 vlan_tag)
1313 struct i40e_q_vector *q_vector = rx_ring->q_vector;
1315 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1316 (vlan_tag & VLAN_VID_MASK))
1317 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1319 napi_gro_receive(&q_vector->napi, skb);
1323 * i40e_alloc_rx_buffers - Replace used receive buffers
1324 * @rx_ring: ring to place buffers on
1325 * @cleaned_count: number of buffers to replace
1327 * Returns false if all allocations were successful, true if any fail
1329 bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1331 u16 ntu = rx_ring->next_to_use;
1332 union i40e_rx_desc *rx_desc;
1333 struct i40e_rx_buffer *bi;
1335 /* do nothing if no valid netdev defined */
1336 if (!rx_ring->netdev || !cleaned_count)
1339 rx_desc = I40E_RX_DESC(rx_ring, ntu);
1340 bi = &rx_ring->rx_bi[ntu];
1343 if (!i40e_alloc_mapped_page(rx_ring, bi))
1346 /* sync the buffer for use by the device */
1347 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1352 /* Refresh the desc even if buffer_addrs didn't change
1353 * because each write-back erases this info.
1355 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1360 if (unlikely(ntu == rx_ring->count)) {
1361 rx_desc = I40E_RX_DESC(rx_ring, 0);
1362 bi = rx_ring->rx_bi;
1366 /* clear the status bits for the next_to_use descriptor */
1367 rx_desc->wb.qword1.status_error_len = 0;
1370 } while (cleaned_count);
1372 if (rx_ring->next_to_use != ntu)
1373 i40e_release_rx_desc(rx_ring, ntu);
1378 if (rx_ring->next_to_use != ntu)
1379 i40e_release_rx_desc(rx_ring, ntu);
1381 /* make sure to come back via polling to try again after
1382 * allocation failure
1388 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1389 * @vsi: the VSI we care about
1390 * @skb: skb currently being received and modified
1391 * @rx_desc: the receive descriptor
1393 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1394 struct sk_buff *skb,
1395 union i40e_rx_desc *rx_desc)
1397 struct i40e_rx_ptype_decoded decoded;
1398 u32 rx_error, rx_status;
1403 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1404 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
1405 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1406 I40E_RXD_QW1_ERROR_SHIFT;
1407 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1408 I40E_RXD_QW1_STATUS_SHIFT;
1409 decoded = decode_rx_desc_ptype(ptype);
1411 skb->ip_summed = CHECKSUM_NONE;
1413 skb_checksum_none_assert(skb);
1415 /* Rx csum enabled and ip headers found? */
1416 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1419 /* did the hardware decode the packet and checksum? */
1420 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1423 /* both known and outer_ip must be set for the below code to work */
1424 if (!(decoded.known && decoded.outer_ip))
1427 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1428 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
1429 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1430 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
1433 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1434 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1437 /* likely incorrect csum if alternate IP extension headers found */
1439 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1440 /* don't increment checksum err here, non-fatal err */
1443 /* there was some L4 error, count error and punt packet to the stack */
1444 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1447 /* handle packets that were not able to be checksummed due
1448 * to arrival speed, in this case the stack can compute
1451 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1454 /* If there is an outer header present that might contain a checksum
1455 * we need to bump the checksum level by 1 to reflect the fact that
1456 * we are indicating we validated the inner checksum.
1458 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
1459 skb->csum_level = 1;
1461 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
1462 switch (decoded.inner_prot) {
1463 case I40E_RX_PTYPE_INNER_PROT_TCP:
1464 case I40E_RX_PTYPE_INNER_PROT_UDP:
1465 case I40E_RX_PTYPE_INNER_PROT_SCTP:
1466 skb->ip_summed = CHECKSUM_UNNECESSARY;
1475 vsi->back->hw_csum_rx_error++;
1479 * i40e_ptype_to_htype - get a hash type
1480 * @ptype: the ptype value from the descriptor
1482 * Returns a hash type to be used by skb_set_hash
1484 static inline int i40e_ptype_to_htype(u8 ptype)
1486 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1489 return PKT_HASH_TYPE_NONE;
1491 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1492 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1493 return PKT_HASH_TYPE_L4;
1494 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1495 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1496 return PKT_HASH_TYPE_L3;
1498 return PKT_HASH_TYPE_L2;
1502 * i40e_rx_hash - set the hash value in the skb
1503 * @ring: descriptor ring
1504 * @rx_desc: specific descriptor
1506 static inline void i40e_rx_hash(struct i40e_ring *ring,
1507 union i40e_rx_desc *rx_desc,
1508 struct sk_buff *skb,
1512 const __le64 rss_mask =
1513 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1514 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1516 if (!(ring->netdev->features & NETIF_F_RXHASH))
1519 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1520 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1521 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1526 * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
1527 * @rx_ring: rx descriptor ring packet is being transacted on
1528 * @rx_desc: pointer to the EOP Rx descriptor
1529 * @skb: pointer to current skb being populated
1530 * @rx_ptype: the packet type decoded by hardware
1532 * This function checks the ring, descriptor, and packet information in
1533 * order to populate the hash, checksum, VLAN, protocol, and
1534 * other fields within the skb.
1537 void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1538 union i40e_rx_desc *rx_desc, struct sk_buff *skb,
1541 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1542 u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1543 I40E_RXD_QW1_STATUS_SHIFT;
1544 u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
1545 u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1546 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
1548 if (unlikely(tsynvalid))
1549 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
1551 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1553 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
1555 skb_record_rx_queue(skb, rx_ring->queue_index);
1557 /* modifies the skb - consumes the enet header */
1558 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1562 * i40e_cleanup_headers - Correct empty headers
1563 * @rx_ring: rx descriptor ring packet is being transacted on
1564 * @skb: pointer to current skb being fixed
1566 * Also address the case where we are pulling data in on pages only
1567 * and as such no data is present in the skb header.
1569 * In addition if skb is not at least 60 bytes we need to pad it so that
1570 * it is large enough to qualify as a valid Ethernet frame.
1572 * Returns true if an error was encountered and skb was freed.
1574 static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
1576 /* if eth_skb_pad returns an error the skb was freed */
1577 if (eth_skb_pad(skb))
1584 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1585 * @rx_ring: rx descriptor ring to store buffers on
1586 * @old_buff: donor buffer to have page reused
1588 * Synchronizes page for reuse by the adapter
1590 static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1591 struct i40e_rx_buffer *old_buff)
1593 struct i40e_rx_buffer *new_buff;
1594 u16 nta = rx_ring->next_to_alloc;
1596 new_buff = &rx_ring->rx_bi[nta];
1598 /* update, and store next to alloc */
1600 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1602 /* transfer page from old buffer to new buffer */
1603 new_buff->dma = old_buff->dma;
1604 new_buff->page = old_buff->page;
1605 new_buff->page_offset = old_buff->page_offset;
1606 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1610 * i40e_page_is_reusable - check if any reuse is possible
1611 * @page: page struct to check
1613 * A page is not reusable if it was allocated under low memory
1614 * conditions, or it's not in the same NUMA node as this CPU.
1616 static inline bool i40e_page_is_reusable(struct page *page)
1618 return (page_to_nid(page) == numa_mem_id()) &&
1619 !page_is_pfmemalloc(page);
1623 * i40e_can_reuse_rx_page - Determine if this page can be reused by
1624 * the adapter for another receive
1626 * @rx_buffer: buffer containing the page
1628 * If page is reusable, rx_buffer->page_offset is adjusted to point to
1629 * an unused region in the page.
1631 * For small pages, @truesize will be a constant value, half the size
1632 * of the memory at page. We'll attempt to alternate between high and
1633 * low halves of the page, with one half ready for use by the hardware
1634 * and the other half being consumed by the stack. We use the page
1635 * ref count to determine whether the stack has finished consuming the
1636 * portion of this page that was passed up with a previous packet. If
1637 * the page ref count is >1, we'll assume the "other" half page is
1638 * still busy, and this page cannot be reused.
1640 * For larger pages, @truesize will be the actual space used by the
1641 * received packet (adjusted upward to an even multiple of the cache
1642 * line size). This will advance through the page by the amount
1643 * actually consumed by the received packets while there is still
1644 * space for a buffer. Each region of larger pages will be used at
1645 * most once, after which the page will not be reused.
1647 * In either case, if the page is reusable its refcount is increased.
1649 static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
1651 #if (PAGE_SIZE >= 8192)
1652 unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
1654 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1655 struct page *page = rx_buffer->page;
1657 /* Is any reuse possible? */
1658 if (unlikely(!i40e_page_is_reusable(page)))
1661 #if (PAGE_SIZE < 8192)
1662 /* if we are only owner of page we can reuse it */
1663 if (unlikely((page_count(page) - pagecnt_bias) > 1))
1666 if (rx_buffer->page_offset > last_offset)
1670 /* If we have drained the page fragment pool we need to update
1671 * the pagecnt_bias and page count so that we fully restock the
1672 * number of references the driver holds.
1674 if (unlikely(!pagecnt_bias)) {
1675 page_ref_add(page, USHRT_MAX);
1676 rx_buffer->pagecnt_bias = USHRT_MAX;
1683 * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
1684 * @rx_ring: rx descriptor ring to transact packets on
1685 * @rx_buffer: buffer containing page to add
1686 * @skb: sk_buff to place the data into
1687 * @size: packet length from rx_desc
1689 * This function will add the data contained in rx_buffer->page to the skb.
1690 * It will just attach the page as a frag to the skb.
1692 * The function will then update the page offset.
1694 static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
1695 struct i40e_rx_buffer *rx_buffer,
1696 struct sk_buff *skb,
1699 #if (PAGE_SIZE < 8192)
1700 unsigned int truesize = I40E_RXBUFFER_2048;
1702 unsigned int truesize = SKB_DATA_ALIGN(size);
1705 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1706 rx_buffer->page_offset, size, truesize);
1708 /* page is being used so we must update the page offset */
1709 #if (PAGE_SIZE < 8192)
1710 rx_buffer->page_offset ^= truesize;
1712 rx_buffer->page_offset += truesize;
1717 * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
1718 * @rx_ring: rx descriptor ring to transact packets on
1719 * @size: size of buffer to add to skb
1721 * This function will pull an Rx buffer from the ring and synchronize it
1722 * for use by the CPU.
1724 static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
1725 const unsigned int size)
1727 struct i40e_rx_buffer *rx_buffer;
1729 rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
1730 prefetchw(rx_buffer->page);
1732 /* we are reusing so sync this buffer for CPU use */
1733 dma_sync_single_range_for_cpu(rx_ring->dev,
1735 rx_buffer->page_offset,
1739 /* We have pulled a buffer for use, so decrement pagecnt_bias */
1740 rx_buffer->pagecnt_bias--;
1746 * i40e_construct_skb - Allocate skb and populate it
1747 * @rx_ring: rx descriptor ring to transact packets on
1748 * @rx_buffer: rx buffer to pull data from
1749 * @size: size of buffer to add to skb
1751 * This function allocates an skb. It then populates it with the page
1752 * data from the current receive descriptor, taking care to set up the
1755 static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
1756 struct i40e_rx_buffer *rx_buffer,
1759 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1760 #if (PAGE_SIZE < 8192)
1761 unsigned int truesize = I40E_RXBUFFER_2048;
1763 unsigned int truesize = SKB_DATA_ALIGN(size);
1765 unsigned int headlen;
1766 struct sk_buff *skb;
1768 /* prefetch first cache line of first page */
1770 #if L1_CACHE_BYTES < 128
1771 prefetch(va + L1_CACHE_BYTES);
1774 /* allocate a skb to store the frags */
1775 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
1777 GFP_ATOMIC | __GFP_NOWARN);
1781 /* Determine available headroom for copy */
1783 if (headlen > I40E_RX_HDR_SIZE)
1784 headlen = eth_get_headlen(va, I40E_RX_HDR_SIZE);
1786 /* align pull length to size of long to optimize memcpy performance */
1787 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
1789 /* update all of the pointers */
1792 skb_add_rx_frag(skb, 0, rx_buffer->page,
1793 rx_buffer->page_offset + headlen,
1796 /* buffer is used by skb, update page_offset */
1797 #if (PAGE_SIZE < 8192)
1798 rx_buffer->page_offset ^= truesize;
1800 rx_buffer->page_offset += truesize;
1803 /* buffer is unused, reset bias back to rx_buffer */
1804 rx_buffer->pagecnt_bias++;
1811 * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
1812 * @rx_ring: rx descriptor ring to transact packets on
1813 * @rx_buffer: rx buffer to pull data from
1815 * This function will clean up the contents of the rx_buffer. It will
1816 * either recycle the bufer or unmap it and free the associated resources.
1818 static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
1819 struct i40e_rx_buffer *rx_buffer)
1821 if (i40e_can_reuse_rx_page(rx_buffer)) {
1822 /* hand second half of page back to the ring */
1823 i40e_reuse_rx_page(rx_ring, rx_buffer);
1824 rx_ring->rx_stats.page_reuse_count++;
1826 /* we are not reusing the buffer so unmap it */
1827 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
1828 DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
1829 __page_frag_cache_drain(rx_buffer->page,
1830 rx_buffer->pagecnt_bias);
1833 /* clear contents of buffer_info */
1834 rx_buffer->page = NULL;
1838 * i40e_is_non_eop - process handling of non-EOP buffers
1839 * @rx_ring: Rx ring being processed
1840 * @rx_desc: Rx descriptor for current buffer
1841 * @skb: Current socket buffer containing buffer in progress
1843 * This function updates next to clean. If the buffer is an EOP buffer
1844 * this function exits returning false, otherwise it will place the
1845 * sk_buff in the next buffer to be chained and return true indicating
1846 * that this is in fact a non-EOP buffer.
1848 static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
1849 union i40e_rx_desc *rx_desc,
1850 struct sk_buff *skb)
1852 u32 ntc = rx_ring->next_to_clean + 1;
1854 /* fetch, update, and store next to clean */
1855 ntc = (ntc < rx_ring->count) ? ntc : 0;
1856 rx_ring->next_to_clean = ntc;
1858 prefetch(I40E_RX_DESC(rx_ring, ntc));
1860 #define staterrlen rx_desc->wb.qword1.status_error_len
1861 if (unlikely(i40e_rx_is_programming_status(le64_to_cpu(staterrlen)))) {
1862 i40e_clean_programming_status(rx_ring, rx_desc);
1865 /* if we are the last buffer then there is nothing else to do */
1866 #define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
1867 if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
1870 rx_ring->rx_stats.non_eop_descs++;
1876 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1877 * @rx_ring: rx descriptor ring to transact packets on
1878 * @budget: Total limit on number of packets to process
1880 * This function provides a "bounce buffer" approach to Rx interrupt
1881 * processing. The advantage to this is that on systems that have
1882 * expensive overhead for IOMMU access this provides a means of avoiding
1883 * it by maintaining the mapping of the page to the system.
1885 * Returns amount of work completed
1887 static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
1889 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1890 struct sk_buff *skb = rx_ring->skb;
1891 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1892 bool failure = false;
1894 while (likely(total_rx_packets < budget)) {
1895 struct i40e_rx_buffer *rx_buffer;
1896 union i40e_rx_desc *rx_desc;
1902 /* return some buffers to hardware, one at a time is too slow */
1903 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1904 failure = failure ||
1905 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
1909 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
1911 /* status_error_len will always be zero for unused descriptors
1912 * because it's cleared in cleanup, and overlaps with hdr_addr
1913 * which is always zero because packet split isn't used, if the
1914 * hardware wrote DD then the length will be non-zero
1916 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1917 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1918 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1922 /* This memory barrier is needed to keep us from reading
1923 * any other fields out of the rx_desc until we have
1924 * verified the descriptor has been written back.
1928 rx_buffer = i40e_get_rx_buffer(rx_ring, size);
1930 /* retrieve a buffer from the ring */
1932 i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
1934 skb = i40e_construct_skb(rx_ring, rx_buffer, size);
1936 /* exit if we failed to retrieve a buffer */
1938 rx_ring->rx_stats.alloc_buff_failed++;
1939 rx_buffer->pagecnt_bias++;
1943 i40e_put_rx_buffer(rx_ring, rx_buffer);
1946 if (i40e_is_non_eop(rx_ring, rx_desc, skb))
1949 /* ERR_MASK will only have valid bits if EOP set, and
1950 * what we are doing here is actually checking
1951 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1954 if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
1955 dev_kfree_skb_any(skb);
1960 if (i40e_cleanup_headers(rx_ring, skb)) {
1965 /* probably a little skewed due to removing CRC */
1966 total_rx_bytes += skb->len;
1968 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1969 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1970 I40E_RXD_QW1_PTYPE_SHIFT;
1972 /* populate checksum, VLAN, and protocol */
1973 i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1975 vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
1976 le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
1978 i40e_receive_skb(rx_ring, skb, vlan_tag);
1981 /* update budget accounting */
1987 u64_stats_update_begin(&rx_ring->syncp);
1988 rx_ring->stats.packets += total_rx_packets;
1989 rx_ring->stats.bytes += total_rx_bytes;
1990 u64_stats_update_end(&rx_ring->syncp);
1991 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1992 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1994 /* guarantee a trip back through this routine if there was a failure */
1995 return failure ? budget : total_rx_packets;
1998 static u32 i40e_buildreg_itr(const int type, const u16 itr)
2002 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2003 /* Don't clear PBA because that can cause lost interrupts that
2004 * came in while we were cleaning/polling
2006 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
2007 (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
2012 /* a small macro to shorten up some long lines */
2013 #define INTREG I40E_PFINT_DYN_CTLN
2014 static inline int get_rx_itr(struct i40e_vsi *vsi, int idx)
2016 return vsi->rx_rings[idx]->rx_itr_setting;
2019 static inline int get_tx_itr(struct i40e_vsi *vsi, int idx)
2021 return vsi->tx_rings[idx]->tx_itr_setting;
2025 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
2026 * @vsi: the VSI we care about
2027 * @q_vector: q_vector for which itr is being updated and interrupt enabled
2030 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
2031 struct i40e_q_vector *q_vector)
2033 struct i40e_hw *hw = &vsi->back->hw;
2034 bool rx = false, tx = false;
2037 int idx = q_vector->v_idx;
2038 int rx_itr_setting, tx_itr_setting;
2040 vector = (q_vector->v_idx + vsi->base_vector);
2042 /* avoid dynamic calculation if in countdown mode OR if
2043 * all dynamic is disabled
2045 rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
2047 rx_itr_setting = get_rx_itr(vsi, idx);
2048 tx_itr_setting = get_tx_itr(vsi, idx);
2050 if (q_vector->itr_countdown > 0 ||
2051 (!ITR_IS_DYNAMIC(rx_itr_setting) &&
2052 !ITR_IS_DYNAMIC(tx_itr_setting))) {
2056 if (ITR_IS_DYNAMIC(tx_itr_setting)) {
2057 rx = i40e_set_new_dynamic_itr(&q_vector->rx);
2058 rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
2061 if (ITR_IS_DYNAMIC(tx_itr_setting)) {
2062 tx = i40e_set_new_dynamic_itr(&q_vector->tx);
2063 txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
2067 /* get the higher of the two ITR adjustments and
2068 * use the same value for both ITR registers
2069 * when in adaptive mode (Rx and/or Tx)
2071 u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
2073 q_vector->tx.itr = q_vector->rx.itr = itr;
2074 txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
2076 rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
2080 /* only need to enable the interrupt once, but need
2081 * to possibly update both ITR values
2084 /* set the INTENA_MSK_MASK so that this first write
2085 * won't actually enable the interrupt, instead just
2086 * updating the ITR (it's bit 31 PF and VF)
2089 /* don't check _DOWN because interrupt isn't being enabled */
2090 wr32(hw, INTREG(vector - 1), rxval);
2094 if (!test_bit(__I40E_DOWN, &vsi->state))
2095 wr32(hw, INTREG(vector - 1), txval);
2097 if (q_vector->itr_countdown)
2098 q_vector->itr_countdown--;
2100 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2104 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
2105 * @napi: napi struct with our devices info in it
2106 * @budget: amount of work driver is allowed to do this pass, in packets
2108 * This function will clean all queues associated with a q_vector.
2110 * Returns the amount of work done
2112 int i40e_napi_poll(struct napi_struct *napi, int budget)
2114 struct i40e_q_vector *q_vector =
2115 container_of(napi, struct i40e_q_vector, napi);
2116 struct i40e_vsi *vsi = q_vector->vsi;
2117 struct i40e_ring *ring;
2118 bool clean_complete = true;
2119 bool arm_wb = false;
2120 int budget_per_ring;
2123 if (test_bit(__I40E_DOWN, &vsi->state)) {
2124 napi_complete(napi);
2128 /* Clear hung_detected bit */
2129 clear_bit(I40E_Q_VECTOR_HUNG_DETECT, &q_vector->hung_detected);
2130 /* Since the actual Tx work is minimal, we can give the Tx a larger
2131 * budget and be more aggressive about cleaning up the Tx descriptors.
2133 i40e_for_each_ring(ring, q_vector->tx) {
2134 if (!i40e_clean_tx_irq(vsi, ring, budget)) {
2135 clean_complete = false;
2138 arm_wb |= ring->arm_wb;
2139 ring->arm_wb = false;
2142 /* Handle case where we are called by netpoll with a budget of 0 */
2146 /* We attempt to distribute budget to each Rx queue fairly, but don't
2147 * allow the budget to go below 1 because that would exit polling early.
2149 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
2151 i40e_for_each_ring(ring, q_vector->rx) {
2152 int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
2154 work_done += cleaned;
2155 /* if we clean as many as budgeted, we must not be done */
2156 if (cleaned >= budget_per_ring)
2157 clean_complete = false;
2160 /* If work not completed, return budget and polling will return */
2161 if (!clean_complete) {
2162 const cpumask_t *aff_mask = &q_vector->affinity_mask;
2163 int cpu_id = smp_processor_id();
2165 /* It is possible that the interrupt affinity has changed but,
2166 * if the cpu is pegged at 100%, polling will never exit while
2167 * traffic continues and the interrupt will be stuck on this
2168 * cpu. We check to make sure affinity is correct before we
2169 * continue to poll, otherwise we must stop polling so the
2170 * interrupt can move to the correct cpu.
2172 if (likely(cpumask_test_cpu(cpu_id, aff_mask) ||
2173 !(vsi->back->flags & I40E_FLAG_MSIX_ENABLED))) {
2176 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
2177 i40e_enable_wb_on_itr(vsi, q_vector);
2183 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
2184 q_vector->arm_wb_state = false;
2186 /* Work is done so exit the polling mode and re-enable the interrupt */
2187 napi_complete_done(napi, work_done);
2189 /* If we're prematurely stopping polling to fix the interrupt
2190 * affinity we want to make sure polling starts back up so we
2191 * issue a call to i40e_force_wb which triggers a SW interrupt.
2193 if (!clean_complete)
2194 i40e_force_wb(vsi, q_vector);
2195 else if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED))
2196 i40e_irq_dynamic_enable_icr0(vsi->back, false);
2198 i40e_update_enable_itr(vsi, q_vector);
2200 return min(work_done, budget - 1);
2204 * i40e_atr - Add a Flow Director ATR filter
2205 * @tx_ring: ring to add programming descriptor to
2207 * @tx_flags: send tx flags
2209 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2212 struct i40e_filter_program_desc *fdir_desc;
2213 struct i40e_pf *pf = tx_ring->vsi->back;
2215 unsigned char *network;
2217 struct ipv6hdr *ipv6;
2221 u32 flex_ptype, dtype_cmd;
2225 /* make sure ATR is enabled */
2226 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
2229 if ((pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED))
2232 /* if sampling is disabled do nothing */
2233 if (!tx_ring->atr_sample_rate)
2236 /* Currently only IPv4/IPv6 with TCP is supported */
2237 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
2240 /* snag network header to get L4 type and address */
2241 hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2242 skb_inner_network_header(skb) : skb_network_header(skb);
2244 /* Note: tx_flags gets modified to reflect inner protocols in
2245 * tx_enable_csum function if encap is enabled.
2247 if (tx_flags & I40E_TX_FLAGS_IPV4) {
2248 /* access ihl as u8 to avoid unaligned access on ia64 */
2249 hlen = (hdr.network[0] & 0x0F) << 2;
2250 l4_proto = hdr.ipv4->protocol;
2252 hlen = hdr.network - skb->data;
2253 l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
2254 hlen -= hdr.network - skb->data;
2257 if (l4_proto != IPPROTO_TCP)
2260 th = (struct tcphdr *)(hdr.network + hlen);
2262 /* Due to lack of space, no more new filters can be programmed */
2263 if (th->syn && (pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED))
2265 if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
2266 (!(pf->hw_disabled_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))) {
2267 /* HW ATR eviction will take care of removing filters on FIN
2270 if (th->fin || th->rst)
2274 tx_ring->atr_count++;
2276 /* sample on all syn/fin/rst packets or once every atr sample rate */
2280 (tx_ring->atr_count < tx_ring->atr_sample_rate))
2283 tx_ring->atr_count = 0;
2285 /* grab the next descriptor */
2286 i = tx_ring->next_to_use;
2287 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2290 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2292 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2293 I40E_TXD_FLTR_QW0_QINDEX_MASK;
2294 flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
2295 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2296 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2297 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2298 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2300 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2302 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2304 dtype_cmd |= (th->fin || th->rst) ?
2305 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2306 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2307 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2308 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2310 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2311 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2313 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2314 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2316 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2317 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
2319 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2320 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2321 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2324 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2325 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2326 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2328 if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
2329 (!(pf->hw_disabled_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)))
2330 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2332 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2333 fdir_desc->rsvd = cpu_to_le32(0);
2334 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2335 fdir_desc->fd_id = cpu_to_le32(0);
2339 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2341 * @tx_ring: ring to send buffer on
2342 * @flags: the tx flags to be set
2344 * Checks the skb and set up correspondingly several generic transmit flags
2345 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2347 * Returns error code indicate the frame should be dropped upon error and the
2348 * otherwise returns 0 to indicate the flags has been set properly.
2350 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2351 struct i40e_ring *tx_ring,
2354 __be16 protocol = skb->protocol;
2357 if (protocol == htons(ETH_P_8021Q) &&
2358 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2359 /* When HW VLAN acceleration is turned off by the user the
2360 * stack sets the protocol to 8021q so that the driver
2361 * can take any steps required to support the SW only
2362 * VLAN handling. In our case the driver doesn't need
2363 * to take any further steps so just set the protocol
2364 * to the encapsulated ethertype.
2366 skb->protocol = vlan_get_protocol(skb);
2370 /* if we have a HW VLAN tag being added, default to the HW one */
2371 if (skb_vlan_tag_present(skb)) {
2372 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2373 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2374 /* else if it is a SW VLAN, check the next protocol and store the tag */
2375 } else if (protocol == htons(ETH_P_8021Q)) {
2376 struct vlan_hdr *vhdr, _vhdr;
2378 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2382 protocol = vhdr->h_vlan_encapsulated_proto;
2383 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2384 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2387 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2390 /* Insert 802.1p priority into VLAN header */
2391 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2392 (skb->priority != TC_PRIO_CONTROL)) {
2393 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2394 tx_flags |= (skb->priority & 0x7) <<
2395 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2396 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2397 struct vlan_ethhdr *vhdr;
2400 rc = skb_cow_head(skb, 0);
2403 vhdr = (struct vlan_ethhdr *)skb->data;
2404 vhdr->h_vlan_TCI = htons(tx_flags >>
2405 I40E_TX_FLAGS_VLAN_SHIFT);
2407 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2417 * i40e_tso - set up the tso context descriptor
2418 * @first: pointer to first Tx buffer for xmit
2419 * @hdr_len: ptr to the size of the packet header
2420 * @cd_type_cmd_tso_mss: Quad Word 1
2422 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2424 static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
2425 u64 *cd_type_cmd_tso_mss)
2427 struct sk_buff *skb = first->skb;
2428 u64 cd_cmd, cd_tso_len, cd_mss;
2439 u32 paylen, l4_offset;
2440 u16 gso_segs, gso_size;
2443 if (skb->ip_summed != CHECKSUM_PARTIAL)
2446 if (!skb_is_gso(skb))
2449 err = skb_cow_head(skb, 0);
2453 ip.hdr = skb_network_header(skb);
2454 l4.hdr = skb_transport_header(skb);
2456 /* initialize outer IP header fields */
2457 if (ip.v4->version == 4) {
2461 ip.v6->payload_len = 0;
2464 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2468 SKB_GSO_UDP_TUNNEL |
2469 SKB_GSO_UDP_TUNNEL_CSUM)) {
2470 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2471 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2474 /* determine offset of outer transport header */
2475 l4_offset = l4.hdr - skb->data;
2477 /* remove payload length from outer checksum */
2478 paylen = skb->len - l4_offset;
2479 csum_replace_by_diff(&l4.udp->check,
2480 (__force __wsum)htonl(paylen));
2483 /* reset pointers to inner headers */
2484 ip.hdr = skb_inner_network_header(skb);
2485 l4.hdr = skb_inner_transport_header(skb);
2487 /* initialize inner IP header fields */
2488 if (ip.v4->version == 4) {
2492 ip.v6->payload_len = 0;
2496 /* determine offset of inner transport header */
2497 l4_offset = l4.hdr - skb->data;
2499 /* remove payload length from inner checksum */
2500 paylen = skb->len - l4_offset;
2501 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
2503 /* compute length of segmentation header */
2504 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
2506 /* pull values out of skb_shinfo */
2507 gso_size = skb_shinfo(skb)->gso_size;
2508 gso_segs = skb_shinfo(skb)->gso_segs;
2510 /* update GSO size and bytecount with header size */
2511 first->gso_segs = gso_segs;
2512 first->bytecount += (first->gso_segs - 1) * *hdr_len;
2514 /* find the field values */
2515 cd_cmd = I40E_TX_CTX_DESC_TSO;
2516 cd_tso_len = skb->len - *hdr_len;
2518 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2519 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2520 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
2525 * i40e_tsyn - set up the tsyn context descriptor
2526 * @tx_ring: ptr to the ring to send
2527 * @skb: ptr to the skb we're sending
2528 * @tx_flags: the collected send information
2529 * @cd_type_cmd_tso_mss: Quad Word 1
2531 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2533 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2534 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2538 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2541 /* Tx timestamps cannot be sampled when doing TSO */
2542 if (tx_flags & I40E_TX_FLAGS_TSO)
2545 /* only timestamp the outbound packet if the user has requested it and
2546 * we are not already transmitting a packet to be timestamped
2548 pf = i40e_netdev_to_pf(tx_ring->netdev);
2549 if (!(pf->flags & I40E_FLAG_PTP))
2553 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
2554 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2555 pf->ptp_tx_skb = skb_get(skb);
2560 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
2561 I40E_TXD_CTX_QW1_CMD_SHIFT;
2567 * i40e_tx_enable_csum - Enable Tx checksum offloads
2569 * @tx_flags: pointer to Tx flags currently set
2570 * @td_cmd: Tx descriptor command bits to set
2571 * @td_offset: Tx descriptor header offsets to set
2572 * @tx_ring: Tx descriptor ring
2573 * @cd_tunneling: ptr to context desc bits
2575 static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
2576 u32 *td_cmd, u32 *td_offset,
2577 struct i40e_ring *tx_ring,
2590 unsigned char *exthdr;
2591 u32 offset, cmd = 0;
2595 if (skb->ip_summed != CHECKSUM_PARTIAL)
2598 ip.hdr = skb_network_header(skb);
2599 l4.hdr = skb_transport_header(skb);
2601 /* compute outer L2 header size */
2602 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
2604 if (skb->encapsulation) {
2606 /* define outer network header type */
2607 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2608 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
2609 I40E_TX_CTX_EXT_IP_IPV4 :
2610 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2612 l4_proto = ip.v4->protocol;
2613 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2614 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
2616 exthdr = ip.hdr + sizeof(*ip.v6);
2617 l4_proto = ip.v6->nexthdr;
2618 if (l4.hdr != exthdr)
2619 ipv6_skip_exthdr(skb, exthdr - skb->data,
2620 &l4_proto, &frag_off);
2623 /* define outer transport */
2626 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
2627 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
2630 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
2631 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
2635 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
2636 l4.hdr = skb_inner_network_header(skb);
2639 if (*tx_flags & I40E_TX_FLAGS_TSO)
2642 skb_checksum_help(skb);
2646 /* compute outer L3 header size */
2647 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
2648 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
2650 /* switch IP header pointer from outer to inner header */
2651 ip.hdr = skb_inner_network_header(skb);
2653 /* compute tunnel header size */
2654 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
2655 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
2657 /* indicate if we need to offload outer UDP header */
2658 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
2659 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2660 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
2661 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
2663 /* record tunnel offload values */
2664 *cd_tunneling |= tunnel;
2666 /* switch L4 header pointer from outer to inner */
2667 l4.hdr = skb_inner_transport_header(skb);
2670 /* reset type as we transition from outer to inner headers */
2671 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
2672 if (ip.v4->version == 4)
2673 *tx_flags |= I40E_TX_FLAGS_IPV4;
2674 if (ip.v6->version == 6)
2675 *tx_flags |= I40E_TX_FLAGS_IPV6;
2678 /* Enable IP checksum offloads */
2679 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2680 l4_proto = ip.v4->protocol;
2681 /* the stack computes the IP header already, the only time we
2682 * need the hardware to recompute it is in the case of TSO.
2684 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
2685 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
2686 I40E_TX_DESC_CMD_IIPT_IPV4;
2687 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2688 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
2690 exthdr = ip.hdr + sizeof(*ip.v6);
2691 l4_proto = ip.v6->nexthdr;
2692 if (l4.hdr != exthdr)
2693 ipv6_skip_exthdr(skb, exthdr - skb->data,
2694 &l4_proto, &frag_off);
2697 /* compute inner L3 header size */
2698 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2700 /* Enable L4 checksum offloads */
2703 /* enable checksum offloads */
2704 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
2705 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2708 /* enable SCTP checksum offload */
2709 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
2710 offset |= (sizeof(struct sctphdr) >> 2) <<
2711 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2714 /* enable UDP checksum offload */
2715 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
2716 offset |= (sizeof(struct udphdr) >> 2) <<
2717 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2720 if (*tx_flags & I40E_TX_FLAGS_TSO)
2722 skb_checksum_help(skb);
2727 *td_offset |= offset;
2733 * i40e_create_tx_ctx Build the Tx context descriptor
2734 * @tx_ring: ring to create the descriptor on
2735 * @cd_type_cmd_tso_mss: Quad Word 1
2736 * @cd_tunneling: Quad Word 0 - bits 0-31
2737 * @cd_l2tag2: Quad Word 0 - bits 32-63
2739 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
2740 const u64 cd_type_cmd_tso_mss,
2741 const u32 cd_tunneling, const u32 cd_l2tag2)
2743 struct i40e_tx_context_desc *context_desc;
2744 int i = tx_ring->next_to_use;
2746 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
2747 !cd_tunneling && !cd_l2tag2)
2750 /* grab the next descriptor */
2751 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
2754 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2756 /* cpu_to_le32 and assign to struct fields */
2757 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2758 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
2759 context_desc->rsvd = cpu_to_le16(0);
2760 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2764 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2765 * @tx_ring: the ring to be checked
2766 * @size: the size buffer we want to assure is available
2768 * Returns -EBUSY if a stop is needed, else 0
2770 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2772 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2773 /* Memory barrier before checking head and tail */
2776 /* Check again in a case another CPU has just made room available. */
2777 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2780 /* A reprieve! - use start_queue because it doesn't call schedule */
2781 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2782 ++tx_ring->tx_stats.restart_queue;
2787 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
2790 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
2791 * and so we need to figure out the cases where we need to linearize the skb.
2793 * For TSO we need to count the TSO header and segment payload separately.
2794 * As such we need to check cases where we have 7 fragments or more as we
2795 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2796 * the segment payload in the first descriptor, and another 7 for the
2799 bool __i40e_chk_linearize(struct sk_buff *skb)
2801 const struct skb_frag_struct *frag, *stale;
2804 /* no need to check if number of frags is less than 7 */
2805 nr_frags = skb_shinfo(skb)->nr_frags;
2806 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
2809 /* We need to walk through the list and validate that each group
2810 * of 6 fragments totals at least gso_size.
2812 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
2813 frag = &skb_shinfo(skb)->frags[0];
2815 /* Initialize size to the negative value of gso_size minus 1. We
2816 * use this as the worst case scenerio in which the frag ahead
2817 * of us only provides one byte which is why we are limited to 6
2818 * descriptors for a single transmit as the header and previous
2819 * fragment are already consuming 2 descriptors.
2821 sum = 1 - skb_shinfo(skb)->gso_size;
2823 /* Add size of frags 0 through 4 to create our initial sum */
2824 sum += skb_frag_size(frag++);
2825 sum += skb_frag_size(frag++);
2826 sum += skb_frag_size(frag++);
2827 sum += skb_frag_size(frag++);
2828 sum += skb_frag_size(frag++);
2830 /* Walk through fragments adding latest fragment, testing it, and
2831 * then removing stale fragments from the sum.
2833 stale = &skb_shinfo(skb)->frags[0];
2835 sum += skb_frag_size(frag++);
2837 /* if sum is negative we failed to make sufficient progress */
2844 sum -= skb_frag_size(stale++);
2851 * i40e_tx_map - Build the Tx descriptor
2852 * @tx_ring: ring to send buffer on
2854 * @first: first buffer info buffer to use
2855 * @tx_flags: collected send information
2856 * @hdr_len: size of the packet header
2857 * @td_cmd: the command field in the descriptor
2858 * @td_offset: offset for checksum or crc
2860 static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2861 struct i40e_tx_buffer *first, u32 tx_flags,
2862 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2864 unsigned int data_len = skb->data_len;
2865 unsigned int size = skb_headlen(skb);
2866 struct skb_frag_struct *frag;
2867 struct i40e_tx_buffer *tx_bi;
2868 struct i40e_tx_desc *tx_desc;
2869 u16 i = tx_ring->next_to_use;
2874 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
2875 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
2876 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
2877 I40E_TX_FLAGS_VLAN_SHIFT;
2880 first->tx_flags = tx_flags;
2882 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2884 tx_desc = I40E_TX_DESC(tx_ring, i);
2887 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2888 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
2890 if (dma_mapping_error(tx_ring->dev, dma))
2893 /* record length, and DMA address */
2894 dma_unmap_len_set(tx_bi, len, size);
2895 dma_unmap_addr_set(tx_bi, dma, dma);
2897 /* align size to end of page */
2898 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
2899 tx_desc->buffer_addr = cpu_to_le64(dma);
2901 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
2902 tx_desc->cmd_type_offset_bsz =
2903 build_ctob(td_cmd, td_offset,
2910 if (i == tx_ring->count) {
2911 tx_desc = I40E_TX_DESC(tx_ring, 0);
2918 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
2919 tx_desc->buffer_addr = cpu_to_le64(dma);
2922 if (likely(!data_len))
2925 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2932 if (i == tx_ring->count) {
2933 tx_desc = I40E_TX_DESC(tx_ring, 0);
2937 size = skb_frag_size(frag);
2940 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2943 tx_bi = &tx_ring->tx_bi[i];
2946 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
2949 if (i == tx_ring->count)
2952 tx_ring->next_to_use = i;
2954 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
2956 /* write last descriptor with EOP bit */
2957 td_cmd |= I40E_TX_DESC_CMD_EOP;
2959 /* We can OR these values together as they both are checked against
2960 * 4 below and at this point desc_count will be used as a boolean value
2961 * after this if/else block.
2963 desc_count |= ++tx_ring->packet_stride;
2965 /* Algorithm to optimize tail and RS bit setting:
2966 * if queue is stopped
2968 * reset packet counter
2969 * else if xmit_more is supported and is true
2970 * advance packet counter to 4
2971 * reset desc_count to 0
2973 * if desc_count >= 4
2975 * reset packet counter
2979 * Note: If there are less than 4 descriptors
2980 * pending and interrupts were disabled the service task will
2981 * trigger a force WB.
2983 if (netif_xmit_stopped(txring_txq(tx_ring))) {
2985 } else if (skb->xmit_more) {
2986 /* set stride to arm on next packet and reset desc_count */
2987 tx_ring->packet_stride = WB_STRIDE;
2989 } else if (desc_count >= WB_STRIDE) {
2991 /* write last descriptor with RS bit set */
2992 td_cmd |= I40E_TX_DESC_CMD_RS;
2993 tx_ring->packet_stride = 0;
2996 tx_desc->cmd_type_offset_bsz =
2997 build_ctob(td_cmd, td_offset, size, td_tag);
2999 /* Force memory writes to complete before letting h/w know there
3000 * are new descriptors to fetch.
3002 * We also use this memory barrier to make certain all of the
3003 * status bits have been updated before next_to_watch is written.
3007 /* set next_to_watch value indicating a packet is present */
3008 first->next_to_watch = tx_desc;
3010 /* notify HW of packet */
3012 writel(i, tx_ring->tail);
3014 /* we need this if more than one processor can write to our tail
3015 * at a time, it synchronizes IO on IA64/Altix systems
3023 dev_info(tx_ring->dev, "TX DMA map failed\n");
3025 /* clear dma mappings for failed tx_bi map */
3027 tx_bi = &tx_ring->tx_bi[i];
3028 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
3036 tx_ring->next_to_use = i;
3040 * i40e_xmit_frame_ring - Sends buffer on Tx ring
3042 * @tx_ring: ring to send buffer on
3044 * Returns NETDEV_TX_OK if sent, else an error code
3046 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
3047 struct i40e_ring *tx_ring)
3049 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
3050 u32 cd_tunneling = 0, cd_l2tag2 = 0;
3051 struct i40e_tx_buffer *first;
3060 /* prefetch the data, we'll need it later */
3061 prefetch(skb->data);
3063 count = i40e_xmit_descriptor_count(skb);
3064 if (i40e_chk_linearize(skb, count)) {
3065 if (__skb_linearize(skb)) {
3066 dev_kfree_skb_any(skb);
3067 return NETDEV_TX_OK;
3069 count = i40e_txd_use_count(skb->len);
3070 tx_ring->tx_stats.tx_linearize++;
3073 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
3074 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
3075 * + 4 desc gap to avoid the cache line where head is,
3076 * + 1 desc for context descriptor,
3077 * otherwise try next time
3079 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
3080 tx_ring->tx_stats.tx_busy++;
3081 return NETDEV_TX_BUSY;
3084 /* record the location of the first descriptor for this packet */
3085 first = &tx_ring->tx_bi[tx_ring->next_to_use];
3087 first->bytecount = skb->len;
3088 first->gso_segs = 1;
3090 /* prepare the xmit flags */
3091 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
3094 /* obtain protocol of skb */
3095 protocol = vlan_get_protocol(skb);
3097 /* setup IPv4/IPv6 offloads */
3098 if (protocol == htons(ETH_P_IP))
3099 tx_flags |= I40E_TX_FLAGS_IPV4;
3100 else if (protocol == htons(ETH_P_IPV6))
3101 tx_flags |= I40E_TX_FLAGS_IPV6;
3103 tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
3108 tx_flags |= I40E_TX_FLAGS_TSO;
3110 /* Always offload the checksum, since it's in the data descriptor */
3111 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
3112 tx_ring, &cd_tunneling);
3116 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
3119 tx_flags |= I40E_TX_FLAGS_TSYN;
3121 skb_tx_timestamp(skb);
3123 /* always enable CRC insertion offload */
3124 td_cmd |= I40E_TX_DESC_CMD_ICRC;
3126 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
3127 cd_tunneling, cd_l2tag2);
3129 /* Add Flow Director ATR if it's enabled.
3131 * NOTE: this must always be directly before the data descriptor.
3133 i40e_atr(tx_ring, skb, tx_flags);
3135 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
3138 return NETDEV_TX_OK;
3141 dev_kfree_skb_any(first->skb);
3143 return NETDEV_TX_OK;
3147 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
3149 * @netdev: network interface device structure
3151 * Returns NETDEV_TX_OK if sent, else an error code
3153 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3155 struct i40e_netdev_priv *np = netdev_priv(netdev);
3156 struct i40e_vsi *vsi = np->vsi;
3157 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
3159 /* hardware can't handle really short frames, hardware padding works
3162 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
3163 return NETDEV_TX_OK;
3165 return i40e_xmit_frame_ring(skb, tx_ring);