1 /* QLogic qede NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <net/udp_tunnel.h>
39 #include <linux/if_ether.h>
40 #include <linux/if_vlan.h>
41 #include <net/ip6_checksum.h>
43 #include <linux/qed/qed_if.h>
45 /*********************************
46 * Content also used by slowpath *
47 *********************************/
49 int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
51 struct sw_rx_data *sw_rx_data;
52 struct eth_rx_bd *rx_bd;
56 data = alloc_pages(GFP_ATOMIC, 0);
60 /* Map the entire page as it would be used
61 * for multiple RX buffer segment size mapping.
63 mapping = dma_map_page(rxq->dev, data, 0,
64 PAGE_SIZE, rxq->data_direction);
65 if (unlikely(dma_mapping_error(rxq->dev, mapping))) {
70 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
71 sw_rx_data->page_offset = 0;
72 sw_rx_data->data = data;
73 sw_rx_data->mapping = mapping;
75 /* Advance PROD and get BD pointer */
76 rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
78 rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
79 rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
86 /* Unmap the data and free skb */
87 int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len)
89 u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
90 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
91 struct eth_tx_1st_bd *first_bd;
92 struct eth_tx_bd *tx_data_bd;
95 bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD;
96 int i, split_bd_len = 0;
100 "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
101 idx, txq->sw_tx_cons, txq->sw_tx_prod);
107 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
111 nbds = first_bd->data.nbds;
114 struct eth_tx_bd *split = (struct eth_tx_bd *)
115 qed_chain_consume(&txq->tx_pbl);
116 split_bd_len = BD_UNMAP_LEN(split);
119 dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
120 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
122 /* Unmap the data of the skb frags */
123 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
124 tx_data_bd = (struct eth_tx_bd *)
125 qed_chain_consume(&txq->tx_pbl);
126 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
127 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
130 while (bds_consumed++ < nbds)
131 qed_chain_consume(&txq->tx_pbl);
134 dev_kfree_skb_any(skb);
135 txq->sw_tx_ring.skbs[idx].skb = NULL;
136 txq->sw_tx_ring.skbs[idx].flags = 0;
141 /* Unmap the data and free skb when mapping failed during start_xmit */
142 static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq,
143 struct eth_tx_1st_bd *first_bd,
144 int nbd, bool data_split)
146 u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
147 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
148 struct eth_tx_bd *tx_data_bd;
149 int i, split_bd_len = 0;
151 /* Return prod to its position before this skb was handled */
152 qed_chain_set_prod(&txq->tx_pbl,
153 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
155 first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
158 struct eth_tx_bd *split = (struct eth_tx_bd *)
159 qed_chain_produce(&txq->tx_pbl);
160 split_bd_len = BD_UNMAP_LEN(split);
164 dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd),
165 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
167 /* Unmap the data of the skb frags */
168 for (i = 0; i < nbd; i++) {
169 tx_data_bd = (struct eth_tx_bd *)
170 qed_chain_produce(&txq->tx_pbl);
171 if (tx_data_bd->nbytes)
172 dma_unmap_page(txq->dev,
173 BD_UNMAP_ADDR(tx_data_bd),
174 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
177 /* Return again prod to its position before this skb was handled */
178 qed_chain_set_prod(&txq->tx_pbl,
179 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
182 dev_kfree_skb_any(skb);
183 txq->sw_tx_ring.skbs[idx].skb = NULL;
184 txq->sw_tx_ring.skbs[idx].flags = 0;
187 static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext)
189 u32 rc = XMIT_L4_CSUM;
192 if (skb->ip_summed != CHECKSUM_PARTIAL)
195 l3_proto = vlan_get_protocol(skb);
196 if (l3_proto == htons(ETH_P_IPV6) &&
197 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
200 if (skb->encapsulation) {
202 if (skb_is_gso(skb)) {
203 unsigned short gso_type = skb_shinfo(skb)->gso_type;
205 if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) ||
206 (gso_type & SKB_GSO_GRE_CSUM))
207 rc |= XMIT_ENC_GSO_L4_CSUM;
220 static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
221 struct eth_tx_2nd_bd *second_bd,
222 struct eth_tx_3rd_bd *third_bd)
225 u16 bd2_bits1 = 0, bd2_bits2 = 0;
227 bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
229 bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
230 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
231 << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
233 bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
234 ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
236 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
237 l4_proto = ipv6_hdr(skb)->nexthdr;
239 l4_proto = ip_hdr(skb)->protocol;
241 if (l4_proto == IPPROTO_UDP)
242 bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
245 third_bd->data.bitfields |=
246 cpu_to_le16(((tcp_hdrlen(skb) / 4) &
247 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
248 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT);
250 second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1);
251 second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
254 static int map_frag_to_bd(struct qede_tx_queue *txq,
255 skb_frag_t *frag, struct eth_tx_bd *bd)
259 /* Map skb non-linear frag data for DMA */
260 mapping = skb_frag_dma_map(txq->dev, frag, 0,
261 skb_frag_size(frag), DMA_TO_DEVICE);
262 if (unlikely(dma_mapping_error(txq->dev, mapping)))
265 /* Setup the data pointer of the frag data */
266 BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
271 static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
274 return (skb_inner_transport_header(skb) +
275 inner_tcp_hdrlen(skb) - skb->data);
277 return (skb_transport_header(skb) +
278 tcp_hdrlen(skb) - skb->data);
281 /* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
282 #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
283 static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type)
285 int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
287 if (xmit_type & XMIT_LSO) {
290 hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
292 /* linear payload would require its own BD */
293 if (skb_headlen(skb) > hlen)
297 return (skb_shinfo(skb)->nr_frags > allowed_frags);
301 static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
303 /* wmb makes sure that the BDs data is updated before updating the
304 * producer, otherwise FW may read old data from the BDs.
308 writel(txq->tx_db.raw, txq->doorbell_addr);
310 /* mmiowb is needed to synchronize doorbell writes from more than one
311 * processor. It guarantees that the write arrives to the device before
312 * the queue lock is released and another start_xmit is called (possibly
313 * on another CPU). Without this barrier, the next doorbell can bypass
314 * this doorbell. This is applicable to IA64/Altix systems.
319 static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
320 struct sw_rx_data *metadata, u16 padding, u16 length)
322 struct qede_tx_queue *txq = fp->xdp_tx;
323 u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
324 struct eth_tx_1st_bd *first_bd;
326 if (!qed_chain_get_elem_left(&txq->tx_pbl)) {
331 first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
333 memset(first_bd, 0, sizeof(*first_bd));
334 first_bd->data.bd_flags.bitfields =
335 BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
336 first_bd->data.bitfields |=
337 (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
338 ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
339 first_bd->data.nbds = 1;
341 /* We can safely ignore the offset, as it's 0 for XDP */
342 BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length);
344 /* Synchronize the buffer back to device, as program [probably]
347 dma_sync_single_for_device(&edev->pdev->dev,
348 metadata->mapping + padding,
349 length, PCI_DMA_TODEVICE);
351 txq->sw_tx_ring.pages[idx] = metadata->data;
354 /* Mark the fastpath for future XDP doorbell */
360 int qede_txq_has_work(struct qede_tx_queue *txq)
364 /* Tell compiler that consumer and producer can change */
366 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
367 if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
370 return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
373 static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
375 struct eth_tx_1st_bd *bd;
378 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
381 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
382 bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
384 dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(bd),
385 PAGE_SIZE, DMA_BIDIRECTIONAL);
386 __free_page(txq->sw_tx_ring.pages[txq->sw_tx_cons &
394 static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
396 struct netdev_queue *netdev_txq;
398 unsigned int pkts_compl = 0, bytes_compl = 0;
401 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
403 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
406 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
409 rc = qede_free_tx_pkt(edev, txq, &len);
411 DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
413 qed_chain_get_cons_idx(&txq->tx_pbl));
423 netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
425 /* Need to make the tx_bd_cons update visible to start_xmit()
426 * before checking for netif_tx_queue_stopped(). Without the
427 * memory barrier, there is a small possibility that
428 * start_xmit() will miss it and cause the queue to be stopped
430 * On the other hand we need an rmb() here to ensure the proper
431 * ordering of bit testing in the following
432 * netif_tx_queue_stopped(txq) call.
436 if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
437 /* Taking tx_lock is needed to prevent reenabling the queue
438 * while it's empty. This could have happen if rx_action() gets
439 * suspended in qede_tx_int() after the condition before
440 * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
442 * stops the queue->sees fresh tx_bd_cons->releases the queue->
443 * sends some packets consuming the whole queue again->
447 __netif_tx_lock(netdev_txq, smp_processor_id());
449 if ((netif_tx_queue_stopped(netdev_txq)) &&
450 (edev->state == QEDE_STATE_OPEN) &&
451 (qed_chain_get_elem_left(&txq->tx_pbl)
452 >= (MAX_SKB_FRAGS + 1))) {
453 netif_tx_wake_queue(netdev_txq);
454 DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
455 "Wake queue was called\n");
458 __netif_tx_unlock(netdev_txq);
464 bool qede_has_rx_work(struct qede_rx_queue *rxq)
466 u16 hw_comp_cons, sw_comp_cons;
468 /* Tell compiler that status block fields can change */
471 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
472 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
474 return hw_comp_cons != sw_comp_cons;
477 static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
479 qed_chain_consume(&rxq->rx_bd_ring);
483 /* This function reuses the buffer(from an offset) from
484 * consumer index to producer index in the bd ring
486 static inline void qede_reuse_page(struct qede_rx_queue *rxq,
487 struct sw_rx_data *curr_cons)
489 struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
490 struct sw_rx_data *curr_prod;
491 dma_addr_t new_mapping;
493 curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
494 *curr_prod = *curr_cons;
496 new_mapping = curr_prod->mapping + curr_prod->page_offset;
498 rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
499 rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping));
502 curr_cons->data = NULL;
505 /* In case of allocation failures reuse buffers
506 * from consumer index to produce buffers for firmware
508 void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count)
510 struct sw_rx_data *curr_cons;
512 for (; count > 0; count--) {
513 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
514 qede_reuse_page(rxq, curr_cons);
515 qede_rx_bd_ring_consume(rxq);
519 static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq,
520 struct sw_rx_data *curr_cons)
522 /* Move to the next segment in the page */
523 curr_cons->page_offset += rxq->rx_buf_seg_size;
525 if (curr_cons->page_offset == PAGE_SIZE) {
526 if (unlikely(qede_alloc_rx_buffer(rxq))) {
527 /* Since we failed to allocate new buffer
528 * current buffer can be used again.
530 curr_cons->page_offset -= rxq->rx_buf_seg_size;
535 dma_unmap_page(rxq->dev, curr_cons->mapping,
536 PAGE_SIZE, rxq->data_direction);
538 /* Increment refcount of the page as we don't want
539 * network stack to take the ownership of the page
540 * which can be recycled multiple times by the driver.
542 page_ref_inc(curr_cons->data);
543 qede_reuse_page(rxq, curr_cons);
549 void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
551 u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
552 u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
553 struct eth_rx_prod_data rx_prods = {0};
555 /* Update producers */
556 rx_prods.bd_prod = cpu_to_le16(bd_prod);
557 rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
559 /* Make sure that the BD and SGE data is updated before updating the
560 * producers since FW might read the BD/SGE right after the producer
565 internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
568 /* mmiowb is needed to synchronize doorbell writes from more than one
569 * processor. It guarantees that the write arrives to the device before
570 * the napi lock is released and another qede_poll is called (possibly
571 * on another CPU). Without this barrier, the next doorbell can bypass
572 * this doorbell. This is applicable to IA64/Altix systems.
577 static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)
579 enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
580 enum rss_hash_type htype;
583 htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
585 hash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
586 (htype == RSS_HASH_TYPE_IPV6)) ?
587 PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
588 hash = le32_to_cpu(rss_hash);
590 skb_set_hash(skb, hash, hash_type);
593 static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
595 skb_checksum_none_assert(skb);
597 if (csum_flag & QEDE_CSUM_UNNECESSARY)
598 skb->ip_summed = CHECKSUM_UNNECESSARY;
600 if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY)
604 static inline void qede_skb_receive(struct qede_dev *edev,
605 struct qede_fastpath *fp,
606 struct qede_rx_queue *rxq,
607 struct sk_buff *skb, u16 vlan_tag)
610 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
612 napi_gro_receive(&fp->napi, skb);
616 static void qede_set_gro_params(struct qede_dev *edev,
618 struct eth_fast_path_rx_tpa_start_cqe *cqe)
620 u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags);
622 if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) &
623 PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2)
624 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
626 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
628 skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
632 static int qede_fill_frag_skb(struct qede_dev *edev,
633 struct qede_rx_queue *rxq,
634 u8 tpa_agg_index, u16 len_on_bd)
636 struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
638 struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
639 struct sk_buff *skb = tpa_info->skb;
641 if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
644 /* Add one frag and update the appropriate fields in the skb */
645 skb_fill_page_desc(skb, tpa_info->frag_id++,
646 current_bd->data, current_bd->page_offset,
649 if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) {
650 /* Incr page ref count to reuse on allocation failure
651 * so that it doesn't get freed while freeing SKB.
653 page_ref_inc(current_bd->data);
657 qed_chain_consume(&rxq->rx_bd_ring);
660 skb->data_len += len_on_bd;
661 skb->truesize += rxq->rx_buf_seg_size;
662 skb->len += len_on_bd;
667 tpa_info->state = QEDE_AGG_STATE_ERROR;
668 qede_recycle_rx_bd_ring(rxq, 1);
673 static bool qede_tunn_exist(u16 flag)
675 return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
676 PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
679 static u8 qede_check_tunn_csum(u16 flag)
684 if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
685 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
686 csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
687 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
689 if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
690 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
691 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
692 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
693 tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
696 csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
697 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
698 PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
699 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
701 if (csum_flag & flag)
702 return QEDE_CSUM_ERROR;
704 return QEDE_CSUM_UNNECESSARY | tcsum;
707 static void qede_tpa_start(struct qede_dev *edev,
708 struct qede_rx_queue *rxq,
709 struct eth_fast_path_rx_tpa_start_cqe *cqe)
711 struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
712 struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
713 struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
714 struct sw_rx_data *replace_buf = &tpa_info->buffer;
715 dma_addr_t mapping = tpa_info->buffer_mapping;
716 struct sw_rx_data *sw_rx_data_cons;
717 struct sw_rx_data *sw_rx_data_prod;
719 sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
720 sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
722 /* Use pre-allocated replacement buffer - we can't release the agg.
723 * start until its over and we don't want to risk allocation failing
724 * here, so re-allocate when aggregation will be over.
726 sw_rx_data_prod->mapping = replace_buf->mapping;
728 sw_rx_data_prod->data = replace_buf->data;
729 rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping));
730 rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping));
731 sw_rx_data_prod->page_offset = replace_buf->page_offset;
735 /* move partial skb from cons to pool (don't unmap yet)
736 * save mapping, incase we drop the packet later on.
738 tpa_info->buffer = *sw_rx_data_cons;
739 mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi),
740 le32_to_cpu(rx_bd_cons->addr.lo));
742 tpa_info->buffer_mapping = mapping;
745 /* set tpa state to start only if we are able to allocate skb
746 * for this aggregation, otherwise mark as error and aggregation will
749 tpa_info->skb = netdev_alloc_skb(edev->ndev,
750 le16_to_cpu(cqe->len_on_first_bd));
751 if (unlikely(!tpa_info->skb)) {
752 DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
753 tpa_info->state = QEDE_AGG_STATE_ERROR;
757 /* Start filling in the aggregation info */
758 skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
759 tpa_info->frag_id = 0;
760 tpa_info->state = QEDE_AGG_STATE_START;
762 /* Store some information from first CQE */
763 tpa_info->start_cqe_placement_offset = cqe->placement_offset;
764 tpa_info->start_cqe_bd_len = le16_to_cpu(cqe->len_on_first_bd);
765 if ((le16_to_cpu(cqe->pars_flags.flags) >>
766 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
767 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
768 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
770 tpa_info->vlan_tag = 0;
772 qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash);
774 /* This is needed in order to enable forwarding support */
775 qede_set_gro_params(edev, tpa_info->skb, cqe);
777 cons_buf: /* We still need to handle bd_len_list to consume buffers */
778 if (likely(cqe->ext_bd_len_list[0]))
779 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
780 le16_to_cpu(cqe->ext_bd_len_list[0]));
782 if (unlikely(cqe->ext_bd_len_list[1])) {
784 "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
785 tpa_info->state = QEDE_AGG_STATE_ERROR;
790 static void qede_gro_ip_csum(struct sk_buff *skb)
792 const struct iphdr *iph = ip_hdr(skb);
795 skb_set_transport_header(skb, sizeof(struct iphdr));
798 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
799 iph->saddr, iph->daddr, 0);
801 tcp_gro_complete(skb);
804 static void qede_gro_ipv6_csum(struct sk_buff *skb)
806 struct ipv6hdr *iph = ipv6_hdr(skb);
809 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
812 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
813 &iph->saddr, &iph->daddr, 0);
814 tcp_gro_complete(skb);
818 static void qede_gro_receive(struct qede_dev *edev,
819 struct qede_fastpath *fp,
823 /* FW can send a single MTU sized packet from gro flow
824 * due to aggregation timeout/last segment etc. which
825 * is not expected to be a gro packet. If a skb has zero
826 * frags then simply push it in the stack as non gso skb.
828 if (unlikely(!skb->data_len)) {
829 skb_shinfo(skb)->gso_type = 0;
830 skb_shinfo(skb)->gso_size = 0;
835 if (skb_shinfo(skb)->gso_size) {
836 skb_reset_network_header(skb);
838 switch (skb->protocol) {
839 case htons(ETH_P_IP):
840 qede_gro_ip_csum(skb);
842 case htons(ETH_P_IPV6):
843 qede_gro_ipv6_csum(skb);
847 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
848 ntohs(skb->protocol));
854 skb_record_rx_queue(skb, fp->rxq->rxq_id);
855 qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag);
858 static inline void qede_tpa_cont(struct qede_dev *edev,
859 struct qede_rx_queue *rxq,
860 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
864 for (i = 0; cqe->len_list[i]; i++)
865 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
866 le16_to_cpu(cqe->len_list[i]));
870 "Strange - TPA cont with more than a single len_list entry\n");
873 static void qede_tpa_end(struct qede_dev *edev,
874 struct qede_fastpath *fp,
875 struct eth_fast_path_rx_tpa_end_cqe *cqe)
877 struct qede_rx_queue *rxq = fp->rxq;
878 struct qede_agg_info *tpa_info;
882 tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
885 for (i = 0; cqe->len_list[i]; i++)
886 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
887 le16_to_cpu(cqe->len_list[i]));
890 "Strange - TPA emd with more than a single len_list entry\n");
892 if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
896 if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1))
898 "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
899 cqe->num_of_bds, tpa_info->frag_id);
900 if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len)))
902 "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
903 le16_to_cpu(cqe->total_packet_len), skb->len);
906 page_address(tpa_info->buffer.data) +
907 tpa_info->start_cqe_placement_offset +
908 tpa_info->buffer.page_offset, tpa_info->start_cqe_bd_len);
910 /* Finalize the SKB */
911 skb->protocol = eth_type_trans(skb, edev->ndev);
912 skb->ip_summed = CHECKSUM_UNNECESSARY;
914 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
915 * to skb_shinfo(skb)->gso_segs
917 NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs);
919 qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
921 tpa_info->state = QEDE_AGG_STATE_NONE;
925 tpa_info->state = QEDE_AGG_STATE_NONE;
926 dev_kfree_skb_any(tpa_info->skb);
927 tpa_info->skb = NULL;
930 static u8 qede_check_notunn_csum(u16 flag)
935 if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
936 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
937 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
938 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
939 csum = QEDE_CSUM_UNNECESSARY;
942 csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
943 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
945 if (csum_flag & flag)
946 return QEDE_CSUM_ERROR;
951 static u8 qede_check_csum(u16 flag)
953 if (!qede_tunn_exist(flag))
954 return qede_check_notunn_csum(flag);
956 return qede_check_tunn_csum(flag);
959 static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
962 u8 tun_pars_flg = cqe->tunnel_pars_flags.flags;
964 if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK <<
965 ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) ||
966 (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
967 PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT)))
973 /* Return true iff packet is to be passed to stack */
974 static bool qede_rx_xdp(struct qede_dev *edev,
975 struct qede_fastpath *fp,
976 struct qede_rx_queue *rxq,
977 struct bpf_prog *prog,
978 struct sw_rx_data *bd,
979 struct eth_fast_path_rx_reg_cqe *cqe)
981 u16 len = le16_to_cpu(cqe->len_on_first_bd);
985 xdp.data = page_address(bd->data) + cqe->placement_offset;
986 xdp.data_end = xdp.data + len;
988 /* Queues always have a full reset currently, so for the time
989 * being until there's atomic program replace just mark read
990 * side for map helpers.
993 act = bpf_prog_run_xdp(prog, &xdp);
999 /* Count number of packets not to be passed to stack */
1004 /* We need the replacement buffer before transmit. */
1005 if (qede_alloc_rx_buffer(rxq)) {
1006 qede_recycle_rx_bd_ring(rxq, 1);
1010 /* Now if there's a transmission problem, we'd still have to
1011 * throw current buffer, as replacement was already allocated.
1013 if (qede_xdp_xmit(edev, fp, bd, cqe->placement_offset, len)) {
1014 dma_unmap_page(rxq->dev, bd->mapping,
1015 PAGE_SIZE, DMA_BIDIRECTIONAL);
1016 __free_page(bd->data);
1019 /* Regardless, we've consumed an Rx BD */
1020 qede_rx_bd_ring_consume(rxq);
1024 bpf_warn_invalid_xdp_action(act);
1027 qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
1033 static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
1034 struct qede_rx_queue *rxq,
1035 struct sw_rx_data *bd, u16 len,
1038 unsigned int offset = bd->page_offset;
1039 struct skb_frag_struct *frag;
1040 struct page *page = bd->data;
1041 unsigned int pull_len;
1042 struct sk_buff *skb;
1045 /* Allocate a new SKB with a sufficient large header len */
1046 skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
1050 /* Copy data into SKB - if it's small, we can simply copy it and
1051 * re-use the already allcoated & mapped memory.
1053 if (len + pad <= edev->rx_copybreak) {
1054 memcpy(skb_put(skb, len),
1055 page_address(page) + pad + offset, len);
1056 qede_reuse_page(rxq, bd);
1060 frag = &skb_shinfo(skb)->frags[0];
1062 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1063 page, pad + offset, len, rxq->rx_buf_seg_size);
1065 va = skb_frag_address(frag);
1066 pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
1068 /* Align the pull_len to optimize memcpy */
1069 memcpy(skb->data, va, ALIGN(pull_len, sizeof(long)));
1071 /* Correct the skb & frag sizes offset after the pull */
1072 skb_frag_size_sub(frag, pull_len);
1073 frag->page_offset += pull_len;
1074 skb->data_len -= pull_len;
1075 skb->tail += pull_len;
1077 if (unlikely(qede_realloc_rx_buffer(rxq, bd))) {
1078 /* Incr page ref count to reuse on allocation failure so
1079 * that it doesn't get freed while freeing SKB [as its
1080 * already mapped there].
1083 dev_kfree_skb_any(skb);
1088 /* We've consumed the first BD and prepared an SKB */
1089 qede_rx_bd_ring_consume(rxq);
1093 static int qede_rx_build_jumbo(struct qede_dev *edev,
1094 struct qede_rx_queue *rxq,
1095 struct sk_buff *skb,
1096 struct eth_fast_path_rx_reg_cqe *cqe,
1099 u16 pkt_len = le16_to_cpu(cqe->pkt_len);
1100 struct sw_rx_data *bd;
1104 pkt_len -= first_bd_len;
1106 /* We've already used one BD for the SKB. Now take care of the rest */
1107 for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) {
1108 u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
1111 if (unlikely(!cur_size)) {
1113 "Still got %d BDs for mapping jumbo, but length became 0\n",
1118 /* We need a replacement buffer for each BD */
1119 if (unlikely(qede_alloc_rx_buffer(rxq)))
1122 /* Now that we've allocated the replacement buffer,
1123 * we can safely consume the next BD and map it to the SKB.
1125 bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1126 bd = &rxq->sw_rx_ring[bd_cons_idx];
1127 qede_rx_bd_ring_consume(rxq);
1129 dma_unmap_page(rxq->dev, bd->mapping,
1130 PAGE_SIZE, DMA_FROM_DEVICE);
1132 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
1133 bd->data, 0, cur_size);
1135 skb->truesize += PAGE_SIZE;
1136 skb->data_len += cur_size;
1137 skb->len += cur_size;
1138 pkt_len -= cur_size;
1141 if (unlikely(pkt_len))
1143 "Mapped all BDs of jumbo, but still have %d bytes\n",
1150 static int qede_rx_process_tpa_cqe(struct qede_dev *edev,
1151 struct qede_fastpath *fp,
1152 struct qede_rx_queue *rxq,
1153 union eth_rx_cqe *cqe,
1154 enum eth_rx_cqe_type type)
1157 case ETH_RX_CQE_TYPE_TPA_START:
1158 qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start);
1160 case ETH_RX_CQE_TYPE_TPA_CONT:
1161 qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
1163 case ETH_RX_CQE_TYPE_TPA_END:
1164 qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
1171 static int qede_rx_process_cqe(struct qede_dev *edev,
1172 struct qede_fastpath *fp,
1173 struct qede_rx_queue *rxq)
1175 struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog);
1176 struct eth_fast_path_rx_reg_cqe *fp_cqe;
1177 u16 len, pad, bd_cons_idx, parse_flag;
1178 enum eth_rx_cqe_type cqe_type;
1179 union eth_rx_cqe *cqe;
1180 struct sw_rx_data *bd;
1181 struct sk_buff *skb;
1185 /* Get the CQE from the completion ring */
1186 cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
1187 cqe_type = cqe->fast_path_regular.type;
1189 /* Process an unlikely slowpath event */
1190 if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
1191 struct eth_slow_path_rx_cqe *sp_cqe;
1193 sp_cqe = (struct eth_slow_path_rx_cqe *)cqe;
1194 edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe);
1198 /* Handle TPA cqes */
1199 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR)
1200 return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type);
1202 /* Get the data from the SW ring; Consume it only after it's evident
1203 * we wouldn't recycle it.
1205 bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1206 bd = &rxq->sw_rx_ring[bd_cons_idx];
1208 fp_cqe = &cqe->fast_path_regular;
1209 len = le16_to_cpu(fp_cqe->len_on_first_bd);
1210 pad = fp_cqe->placement_offset;
1212 /* Run eBPF program if one is attached */
1214 if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe))
1217 /* If this is an error packet then drop it */
1218 flags = cqe->fast_path_regular.pars_flags.flags;
1219 parse_flag = le16_to_cpu(flags);
1221 csum_flag = qede_check_csum(parse_flag);
1222 if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
1223 if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) {
1227 "CQE has error, flags = %x, dropping incoming packet\n",
1229 rxq->rx_hw_errors++;
1230 qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
1235 /* Basic validation passed; Need to prepare an SKB. This would also
1236 * guarantee to finally consume the first BD upon success.
1238 skb = qede_rx_allocate_skb(edev, rxq, bd, len, pad);
1240 rxq->rx_alloc_errors++;
1241 qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
1245 /* In case of Jumbo packet, several PAGE_SIZEd buffers will be pointed
1248 if (fp_cqe->bd_num > 1) {
1249 u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb,
1252 if (unlikely(unmapped_frags > 0)) {
1253 qede_recycle_rx_bd_ring(rxq, unmapped_frags);
1254 dev_kfree_skb_any(skb);
1259 /* The SKB contains all the data. Now prepare meta-magic */
1260 skb->protocol = eth_type_trans(skb, edev->ndev);
1261 qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash);
1262 qede_set_skb_csum(skb, csum_flag);
1263 skb_record_rx_queue(skb, rxq->rxq_id);
1265 /* SKB is prepared - pass it to stack */
1266 qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag));
1271 static int qede_rx_int(struct qede_fastpath *fp, int budget)
1273 struct qede_rx_queue *rxq = fp->rxq;
1274 struct qede_dev *edev = fp->edev;
1275 u16 hw_comp_cons, sw_comp_cons;
1278 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
1279 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1281 /* Memory barrier to prevent the CPU from doing speculative reads of CQE
1282 * / BD in the while-loop before reading hw_comp_cons. If the CQE is
1283 * read before it is written by FW, then FW writes CQE and SB, and then
1284 * the CPU reads the hw_comp_cons, it will use an old CQE.
1288 /* Loop to complete all indicated BDs */
1289 while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) {
1290 qede_rx_process_cqe(edev, fp, rxq);
1291 qed_chain_recycle_consumed(&rxq->rx_comp_ring);
1292 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1296 /* Update producers */
1297 qede_update_rx_prod(edev, rxq);
1302 static bool qede_poll_is_more_work(struct qede_fastpath *fp)
1304 qed_sb_update_sb_idx(fp->sb_info);
1306 /* *_has_*_work() reads the status block, thus we need to ensure that
1307 * status block indices have been actually read (qed_sb_update_sb_idx)
1308 * prior to this check (*_has_*_work) so that we won't write the
1309 * "newer" value of the status block to HW (if there was a DMA right
1310 * after qede_has_rx_work and if there is no rmb, the memory reading
1311 * (qed_sb_update_sb_idx) may be postponed to right before *_ack_sb).
1312 * In this case there will never be another interrupt until there is
1313 * another update of the status block, while there is still unhandled
1318 if (likely(fp->type & QEDE_FASTPATH_RX))
1319 if (qede_has_rx_work(fp->rxq))
1322 if (fp->type & QEDE_FASTPATH_XDP)
1323 if (qede_txq_has_work(fp->xdp_tx))
1326 if (likely(fp->type & QEDE_FASTPATH_TX))
1327 if (qede_txq_has_work(fp->txq))
1333 /*********************
1334 * NDO & API related *
1335 *********************/
1336 int qede_poll(struct napi_struct *napi, int budget)
1338 struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
1340 struct qede_dev *edev = fp->edev;
1341 int rx_work_done = 0;
1343 if (likely(fp->type & QEDE_FASTPATH_TX) && qede_txq_has_work(fp->txq))
1344 qede_tx_int(edev, fp->txq);
1346 if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx))
1347 qede_xdp_tx_int(edev, fp->xdp_tx);
1349 rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
1350 qede_has_rx_work(fp->rxq)) ?
1351 qede_rx_int(fp, budget) : 0;
1352 if (rx_work_done < budget) {
1353 if (!qede_poll_is_more_work(fp)) {
1354 napi_complete(napi);
1356 /* Update and reenable interrupts */
1357 qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
1359 rx_work_done = budget;
1364 u16 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl);
1367 fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
1368 qede_update_tx_producer(fp->xdp_tx);
1371 return rx_work_done;
1374 irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
1376 struct qede_fastpath *fp = fp_cookie;
1378 qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
1380 napi_schedule_irqoff(&fp->napi);
1384 /* Main transmit function */
1385 netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1387 struct qede_dev *edev = netdev_priv(ndev);
1388 struct netdev_queue *netdev_txq;
1389 struct qede_tx_queue *txq;
1390 struct eth_tx_1st_bd *first_bd;
1391 struct eth_tx_2nd_bd *second_bd = NULL;
1392 struct eth_tx_3rd_bd *third_bd = NULL;
1393 struct eth_tx_bd *tx_data_bd = NULL;
1397 int rc, frag_idx = 0, ipv6_ext = 0;
1401 bool data_split = false;
1403 /* Get tx-queue context and netdev index */
1404 txq_index = skb_get_queue_mapping(skb);
1405 WARN_ON(txq_index >= QEDE_TSS_COUNT(edev));
1406 txq = edev->fp_array[edev->fp_num_rx + txq_index].txq;
1407 netdev_txq = netdev_get_tx_queue(ndev, txq_index);
1409 WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
1411 xmit_type = qede_xmit_type(skb, &ipv6_ext);
1413 #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
1414 if (qede_pkt_req_lin(skb, xmit_type)) {
1415 if (skb_linearize(skb)) {
1417 "SKB linearization failed - silently dropping this SKB\n");
1418 dev_kfree_skb_any(skb);
1419 return NETDEV_TX_OK;
1424 /* Fill the entry in the SW ring and the BDs in the FW ring */
1425 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
1426 txq->sw_tx_ring.skbs[idx].skb = skb;
1427 first_bd = (struct eth_tx_1st_bd *)
1428 qed_chain_produce(&txq->tx_pbl);
1429 memset(first_bd, 0, sizeof(*first_bd));
1430 first_bd->data.bd_flags.bitfields =
1431 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
1433 /* Map skb linear data for DMA and set in the first BD */
1434 mapping = dma_map_single(txq->dev, skb->data,
1435 skb_headlen(skb), DMA_TO_DEVICE);
1436 if (unlikely(dma_mapping_error(txq->dev, mapping))) {
1437 DP_NOTICE(edev, "SKB mapping failed\n");
1438 qede_free_failed_tx_pkt(txq, first_bd, 0, false);
1439 qede_update_tx_producer(txq);
1440 return NETDEV_TX_OK;
1443 BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
1445 /* In case there is IPv6 with extension headers or LSO we need 2nd and
1448 if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
1449 second_bd = (struct eth_tx_2nd_bd *)
1450 qed_chain_produce(&txq->tx_pbl);
1451 memset(second_bd, 0, sizeof(*second_bd));
1454 third_bd = (struct eth_tx_3rd_bd *)
1455 qed_chain_produce(&txq->tx_pbl);
1456 memset(third_bd, 0, sizeof(*third_bd));
1459 /* We need to fill in additional data in second_bd... */
1460 tx_data_bd = (struct eth_tx_bd *)second_bd;
1463 if (skb_vlan_tag_present(skb)) {
1464 first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
1465 first_bd->data.bd_flags.bitfields |=
1466 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
1469 /* Fill the parsing flags & params according to the requested offload */
1470 if (xmit_type & XMIT_L4_CSUM) {
1471 /* We don't re-calculate IP checksum as it is already done by
1474 first_bd->data.bd_flags.bitfields |=
1475 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
1477 if (xmit_type & XMIT_ENC) {
1478 first_bd->data.bd_flags.bitfields |=
1479 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
1480 first_bd->data.bitfields |=
1481 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
1484 /* Legacy FW had flipped behavior in regard to this bit -
1485 * I.e., needed to set to prevent FW from touching encapsulated
1486 * packets when it didn't need to.
1488 if (unlikely(txq->is_legacy))
1489 first_bd->data.bitfields ^=
1490 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
1492 /* If the packet is IPv6 with extension header, indicate that
1493 * to FW and pass few params, since the device cracker doesn't
1494 * support parsing IPv6 with extension header/s.
1496 if (unlikely(ipv6_ext))
1497 qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
1500 if (xmit_type & XMIT_LSO) {
1501 first_bd->data.bd_flags.bitfields |=
1502 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
1503 third_bd->data.lso_mss =
1504 cpu_to_le16(skb_shinfo(skb)->gso_size);
1506 if (unlikely(xmit_type & XMIT_ENC)) {
1507 first_bd->data.bd_flags.bitfields |=
1508 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
1510 if (xmit_type & XMIT_ENC_GSO_L4_CSUM) {
1511 u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
1513 first_bd->data.bd_flags.bitfields |= 1 << tmp;
1515 hlen = qede_get_skb_hlen(skb, true);
1517 first_bd->data.bd_flags.bitfields |=
1518 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
1519 hlen = qede_get_skb_hlen(skb, false);
1522 /* @@@TBD - if will not be removed need to check */
1523 third_bd->data.bitfields |=
1524 cpu_to_le16(1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
1526 /* Make life easier for FW guys who can't deal with header and
1527 * data on same BD. If we need to split, use the second bd...
1529 if (unlikely(skb_headlen(skb) > hlen)) {
1530 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1531 "TSO split header size is %d (%x:%x)\n",
1532 first_bd->nbytes, first_bd->addr.hi,
1535 mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
1536 le32_to_cpu(first_bd->addr.lo)) +
1539 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
1540 le16_to_cpu(first_bd->nbytes) -
1543 /* this marks the BD as one that has no
1544 * individual mapping
1546 txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD;
1548 first_bd->nbytes = cpu_to_le16(hlen);
1550 tx_data_bd = (struct eth_tx_bd *)third_bd;
1554 first_bd->data.bitfields |=
1555 (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
1556 ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
1559 /* Handle fragmented skb */
1560 /* special handle for frags inside 2nd and 3rd bds.. */
1561 while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
1562 rc = map_frag_to_bd(txq,
1563 &skb_shinfo(skb)->frags[frag_idx],
1566 qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
1567 qede_update_tx_producer(txq);
1568 return NETDEV_TX_OK;
1571 if (tx_data_bd == (struct eth_tx_bd *)second_bd)
1572 tx_data_bd = (struct eth_tx_bd *)third_bd;
1579 /* map last frags into 4th, 5th .... */
1580 for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
1581 tx_data_bd = (struct eth_tx_bd *)
1582 qed_chain_produce(&txq->tx_pbl);
1584 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
1586 rc = map_frag_to_bd(txq,
1587 &skb_shinfo(skb)->frags[frag_idx],
1590 qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
1591 qede_update_tx_producer(txq);
1592 return NETDEV_TX_OK;
1596 /* update the first BD with the actual num BDs */
1597 first_bd->data.nbds = nbd;
1599 netdev_tx_sent_queue(netdev_txq, skb->len);
1601 skb_tx_timestamp(skb);
1603 /* Advance packet producer only before sending the packet since mapping
1604 * of pages may fail.
1608 /* 'next page' entries are counted in the producer value */
1609 txq->tx_db.data.bd_prod =
1610 cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
1612 if (!skb->xmit_more || netif_xmit_stopped(netdev_txq))
1613 qede_update_tx_producer(txq);
1615 if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
1616 < (MAX_SKB_FRAGS + 1))) {
1618 qede_update_tx_producer(txq);
1620 netif_tx_stop_queue(netdev_txq);
1622 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1623 "Stop queue was called\n");
1624 /* paired memory barrier is in qede_tx_int(), we have to keep
1625 * ordering of set_bit() in netif_tx_stop_queue() and read of
1630 if ((qed_chain_get_elem_left(&txq->tx_pbl) >=
1631 (MAX_SKB_FRAGS + 1)) &&
1632 (edev->state == QEDE_STATE_OPEN)) {
1633 netif_tx_wake_queue(netdev_txq);
1634 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1635 "Wake queue was called\n");
1639 return NETDEV_TX_OK;
1642 /* 8B udp header + 8B base tunnel header + 32B option length */
1643 #define QEDE_MAX_TUN_HDR_LEN 48
1645 netdev_features_t qede_features_check(struct sk_buff *skb,
1646 struct net_device *dev,
1647 netdev_features_t features)
1649 if (skb->encapsulation) {
1652 switch (vlan_get_protocol(skb)) {
1653 case htons(ETH_P_IP):
1654 l4_proto = ip_hdr(skb)->protocol;
1656 case htons(ETH_P_IPV6):
1657 l4_proto = ipv6_hdr(skb)->nexthdr;
1663 /* Disable offloads for geneve tunnels, as HW can't parse
1664 * the geneve header which has option length greater than 32B.
1666 if ((l4_proto == IPPROTO_UDP) &&
1667 ((skb_inner_mac_header(skb) -
1668 skb_transport_header(skb)) > QEDE_MAX_TUN_HDR_LEN))
1669 return features & ~(NETIF_F_CSUM_MASK |