1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2011 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
25 #include <net/ip6_checksum.h>
26 #include <linux/firmware.h>
27 #include <linux/prefetch.h>
28 #include "bnx2x_cmn.h"
29 #include "bnx2x_init.h"
35 * bnx2x_bz_fp - zero content of the fastpath structure.
38 * @index: fastpath index to be zeroed
40 * Makes sure the contents of the bp->fp[index].napi is kept
43 static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
45 struct bnx2x_fastpath *fp = &bp->fp[index];
46 struct napi_struct orig_napi = fp->napi;
47 /* bzero bnx2x_fastpath contents */
48 memset(fp, 0, sizeof(*fp));
50 /* Restore the NAPI object as it has been already initialized */
56 fp->max_cos = bp->max_cos;
58 /* Special queues support only one CoS */
62 * set the tpa flag for each queue. The tpa flag determines the queue
63 * minimal size so it must be set prior to queue memory allocation
65 fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0);
68 /* We don't want TPA on an FCoE L2 ring */
75 * bnx2x_move_fp - move content of the fastpath structure.
78 * @from: source FP index
79 * @to: destination FP index
81 * Makes sure the contents of the bp->fp[to].napi is kept
82 * intact. This is done by first copying the napi struct from
83 * the target to the source, and then mem copying the entire
84 * source onto the target
86 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
88 struct bnx2x_fastpath *from_fp = &bp->fp[from];
89 struct bnx2x_fastpath *to_fp = &bp->fp[to];
91 /* Copy the NAPI object as it has been already initialized */
92 from_fp->napi = to_fp->napi;
94 /* Move bnx2x_fastpath contents */
95 memcpy(to_fp, from_fp, sizeof(*to_fp));
99 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
101 /* free skb in the packet ring at pos idx
102 * return idx of last bd freed
104 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
105 u16 idx, unsigned int *pkts_compl,
106 unsigned int *bytes_compl)
108 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
109 struct eth_tx_start_bd *tx_start_bd;
110 struct eth_tx_bd *tx_data_bd;
111 struct sk_buff *skb = tx_buf->skb;
112 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
115 /* prefetch skb end pointer to speedup dev_kfree_skb() */
118 DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
119 txdata->txq_index, idx, tx_buf, skb);
122 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
123 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
124 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
125 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
128 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
129 #ifdef BNX2X_STOP_ON_ERROR
130 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
131 BNX2X_ERR("BAD nbd!\n");
135 new_cons = nbd + tx_buf->first_bd;
137 /* Get the next bd */
138 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
140 /* Skip a parse bd... */
142 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
144 /* ...and the TSO split header bd since they have no mapping */
145 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
147 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
153 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
154 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
155 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
156 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
158 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
165 (*bytes_compl) += skb->len;
167 dev_kfree_skb_any(skb);
168 tx_buf->first_bd = 0;
174 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
176 struct netdev_queue *txq;
177 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
178 unsigned int pkts_compl = 0, bytes_compl = 0;
180 #ifdef BNX2X_STOP_ON_ERROR
181 if (unlikely(bp->panic))
185 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
186 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
187 sw_cons = txdata->tx_pkt_cons;
189 while (sw_cons != hw_cons) {
192 pkt_cons = TX_BD(sw_cons);
194 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
196 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
198 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
199 &pkts_compl, &bytes_compl);
204 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
206 txdata->tx_pkt_cons = sw_cons;
207 txdata->tx_bd_cons = bd_cons;
209 /* Need to make the tx_bd_cons update visible to start_xmit()
210 * before checking for netif_tx_queue_stopped(). Without the
211 * memory barrier, there is a small possibility that
212 * start_xmit() will miss it and cause the queue to be stopped
214 * On the other hand we need an rmb() here to ensure the proper
215 * ordering of bit testing in the following
216 * netif_tx_queue_stopped(txq) call.
220 if (unlikely(netif_tx_queue_stopped(txq))) {
221 /* Taking tx_lock() is needed to prevent reenabling the queue
222 * while it's empty. This could have happen if rx_action() gets
223 * suspended in bnx2x_tx_int() after the condition before
224 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
226 * stops the queue->sees fresh tx_bd_cons->releases the queue->
227 * sends some packets consuming the whole queue again->
231 __netif_tx_lock(txq, smp_processor_id());
233 if ((netif_tx_queue_stopped(txq)) &&
234 (bp->state == BNX2X_STATE_OPEN) &&
235 (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
236 netif_tx_wake_queue(txq);
238 __netif_tx_unlock(txq);
243 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
246 u16 last_max = fp->last_max_sge;
248 if (SUB_S16(idx, last_max) > 0)
249 fp->last_max_sge = idx;
252 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
253 struct eth_fast_path_rx_cqe *fp_cqe)
255 struct bnx2x *bp = fp->bp;
256 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
257 le16_to_cpu(fp_cqe->len_on_bd)) >>
259 u16 last_max, last_elem, first_elem;
266 /* First mark all used pages */
267 for (i = 0; i < sge_len; i++)
268 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
269 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
271 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
272 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
274 /* Here we assume that the last SGE index is the biggest */
275 prefetch((void *)(fp->sge_mask));
276 bnx2x_update_last_max_sge(fp,
277 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
279 last_max = RX_SGE(fp->last_max_sge);
280 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
281 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
283 /* If ring is not full */
284 if (last_elem + 1 != first_elem)
287 /* Now update the prod */
288 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
289 if (likely(fp->sge_mask[i]))
292 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
293 delta += BIT_VEC64_ELEM_SZ;
297 fp->rx_sge_prod += delta;
298 /* clear page-end entries */
299 bnx2x_clear_sge_mask_next_elems(fp);
302 DP(NETIF_MSG_RX_STATUS,
303 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
304 fp->last_max_sge, fp->rx_sge_prod);
307 /* Set Toeplitz hash value in the skb using the value from the
308 * CQE (calculated by HW).
310 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
311 const struct eth_fast_path_rx_cqe *cqe)
313 /* Set Toeplitz hash from CQE */
314 if ((bp->dev->features & NETIF_F_RXHASH) &&
315 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
316 return le32_to_cpu(cqe->rss_hash_result);
320 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
322 struct eth_fast_path_rx_cqe *cqe)
324 struct bnx2x *bp = fp->bp;
325 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
326 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
327 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
329 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
330 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
332 /* print error if current state != stop */
333 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
334 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
336 /* Try to map an empty data buffer from the aggregation info */
337 mapping = dma_map_single(&bp->pdev->dev,
338 first_buf->data + NET_SKB_PAD,
339 fp->rx_buf_size, DMA_FROM_DEVICE);
341 * ...if it fails - move the skb from the consumer to the producer
342 * and set the current aggregation state as ERROR to drop it
343 * when TPA_STOP arrives.
346 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
347 /* Move the BD from the consumer to the producer */
348 bnx2x_reuse_rx_data(fp, cons, prod);
349 tpa_info->tpa_state = BNX2X_TPA_ERROR;
353 /* move empty data from pool to prod */
354 prod_rx_buf->data = first_buf->data;
355 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
356 /* point prod_bd to new data */
357 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
358 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
360 /* move partial skb from cons to pool (don't unmap yet) */
361 *first_buf = *cons_rx_buf;
363 /* mark bin state as START */
364 tpa_info->parsing_flags =
365 le16_to_cpu(cqe->pars_flags.flags);
366 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
367 tpa_info->tpa_state = BNX2X_TPA_START;
368 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
369 tpa_info->placement_offset = cqe->placement_offset;
370 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
372 #ifdef BNX2X_STOP_ON_ERROR
373 fp->tpa_queue_used |= (1 << queue);
374 #ifdef _ASM_GENERIC_INT_L64_H
375 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
377 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
383 /* Timestamp option length allowed for TPA aggregation:
385 * nop nop kind length echo val
387 #define TPA_TSTAMP_OPT_LEN 12
389 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
392 * @parsing_flags: parsing flags from the START CQE
393 * @len_on_bd: total length of the first packet for the
396 * Approximate value of the MSS for this aggregation calculated using
397 * the first packet of it.
399 static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
403 * TPA arrgregation won't have either IP options or TCP options
404 * other than timestamp or IPv6 extension headers.
406 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
408 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
409 PRS_FLAG_OVERETH_IPV6)
410 hdrs_len += sizeof(struct ipv6hdr);
412 hdrs_len += sizeof(struct iphdr);
415 /* Check if there was a TCP timestamp, if there is it's will
416 * always be 12 bytes length: nop nop kind length echo val.
418 * Otherwise FW would close the aggregation.
420 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
421 hdrs_len += TPA_TSTAMP_OPT_LEN;
423 return len_on_bd - hdrs_len;
426 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
427 u16 queue, struct sk_buff *skb,
428 struct eth_end_agg_rx_cqe *cqe,
431 struct sw_rx_page *rx_pg, old_rx_pg;
432 u32 i, frag_len, frag_size, pages;
435 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
436 u16 len_on_bd = tpa_info->len_on_bd;
438 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
439 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
441 /* This is needed in order to enable forwarding support */
443 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
444 tpa_info->parsing_flags, len_on_bd);
446 #ifdef BNX2X_STOP_ON_ERROR
447 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
448 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
450 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
456 /* Run through the SGL and compose the fragmented skb */
457 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
458 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
460 /* FW gives the indices of the SGE as if the ring is an array
461 (meaning that "next" element will consume 2 indices) */
462 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
463 rx_pg = &fp->rx_page_ring[sge_idx];
466 /* If we fail to allocate a substitute page, we simply stop
467 where we are and drop the whole packet */
468 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
470 fp->eth_q_stats.rx_skb_alloc_failed++;
474 /* Unmap the page as we r going to pass it to the stack */
475 dma_unmap_page(&bp->pdev->dev,
476 dma_unmap_addr(&old_rx_pg, mapping),
477 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
479 /* Add one frag and update the appropriate fields in the skb */
480 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
482 skb->data_len += frag_len;
483 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
484 skb->len += frag_len;
486 frag_size -= frag_len;
492 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
493 u16 queue, struct eth_end_agg_rx_cqe *cqe,
496 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
497 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
498 u32 pad = tpa_info->placement_offset;
499 u16 len = tpa_info->len_on_bd;
500 struct sk_buff *skb = NULL;
501 u8 *data = rx_buf->data;
504 u8 old_tpa_state = tpa_info->tpa_state;
506 tpa_info->tpa_state = BNX2X_TPA_STOP;
508 /* If we there was an error during the handling of the TPA_START -
509 * drop this aggregation.
511 if (old_tpa_state == BNX2X_TPA_ERROR)
514 /* Try to allocate the new data */
515 new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
517 /* Unmap skb in the pool anyway, as we are going to change
518 pool entry status to BNX2X_TPA_STOP even if new skb allocation
520 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
521 fp->rx_buf_size, DMA_FROM_DEVICE);
522 if (likely(new_data))
523 skb = build_skb(data);
527 #ifdef BNX2X_STOP_ON_ERROR
528 if (pad + len > fp->rx_buf_size) {
529 BNX2X_ERR("skb_put is about to fail... "
530 "pad %d len %d rx_buf_size %d\n",
531 pad, len, fp->rx_buf_size);
537 skb_reserve(skb, pad + NET_SKB_PAD);
539 skb->rxhash = tpa_info->rxhash;
541 skb->protocol = eth_type_trans(skb, bp->dev);
542 skb->ip_summed = CHECKSUM_UNNECESSARY;
544 if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
545 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
546 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
547 napi_gro_receive(&fp->napi, skb);
549 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
550 " - dropping packet!\n");
551 dev_kfree_skb_any(skb);
555 /* put new data in bin */
556 rx_buf->data = new_data;
562 /* drop the packet and keep the buffer in the bin */
563 DP(NETIF_MSG_RX_STATUS,
564 "Failed to allocate or map a new skb - dropping packet!\n");
565 fp->eth_q_stats.rx_skb_alloc_failed++;
569 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
571 struct bnx2x *bp = fp->bp;
572 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
573 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
576 #ifdef BNX2X_STOP_ON_ERROR
577 if (unlikely(bp->panic))
581 /* CQ "next element" is of the size of the regular element,
582 that's why it's ok here */
583 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
584 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
587 bd_cons = fp->rx_bd_cons;
588 bd_prod = fp->rx_bd_prod;
589 bd_prod_fw = bd_prod;
590 sw_comp_cons = fp->rx_comp_cons;
591 sw_comp_prod = fp->rx_comp_prod;
593 /* Memory barrier necessary as speculative reads of the rx
594 * buffer can be ahead of the index in the status block
598 DP(NETIF_MSG_RX_STATUS,
599 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
600 fp->index, hw_comp_cons, sw_comp_cons);
602 while (sw_comp_cons != hw_comp_cons) {
603 struct sw_rx_bd *rx_buf = NULL;
605 union eth_rx_cqe *cqe;
606 struct eth_fast_path_rx_cqe *cqe_fp;
608 enum eth_rx_cqe_type cqe_fp_type;
612 #ifdef BNX2X_STOP_ON_ERROR
613 if (unlikely(bp->panic))
617 comp_ring_cons = RCQ_BD(sw_comp_cons);
618 bd_prod = RX_BD(bd_prod);
619 bd_cons = RX_BD(bd_cons);
621 cqe = &fp->rx_comp_ring[comp_ring_cons];
622 cqe_fp = &cqe->fast_path_cqe;
623 cqe_fp_flags = cqe_fp->type_error_flags;
624 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
626 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
627 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
628 cqe_fp_flags, cqe_fp->status_flags,
629 le32_to_cpu(cqe_fp->rss_hash_result),
630 le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
632 /* is this a slowpath msg? */
633 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
634 bnx2x_sp_event(fp, cqe);
637 rx_buf = &fp->rx_buf_ring[bd_cons];
640 if (!CQE_TYPE_FAST(cqe_fp_type)) {
641 #ifdef BNX2X_STOP_ON_ERROR
643 if (fp->disable_tpa &&
644 (CQE_TYPE_START(cqe_fp_type) ||
645 CQE_TYPE_STOP(cqe_fp_type)))
646 BNX2X_ERR("START/STOP packet while "
647 "disable_tpa type %x\n",
648 CQE_TYPE(cqe_fp_type));
651 if (CQE_TYPE_START(cqe_fp_type)) {
652 u16 queue = cqe_fp->queue_index;
653 DP(NETIF_MSG_RX_STATUS,
654 "calling tpa_start on queue %d\n",
657 bnx2x_tpa_start(fp, queue,
663 cqe->end_agg_cqe.queue_index;
664 DP(NETIF_MSG_RX_STATUS,
665 "calling tpa_stop on queue %d\n",
668 bnx2x_tpa_stop(bp, fp, queue,
671 #ifdef BNX2X_STOP_ON_ERROR
676 bnx2x_update_sge_prod(fp, cqe_fp);
681 len = le16_to_cpu(cqe_fp->pkt_len);
682 pad = cqe_fp->placement_offset;
683 dma_sync_single_for_cpu(&bp->pdev->dev,
684 dma_unmap_addr(rx_buf, mapping),
685 pad + RX_COPY_THRESH,
688 prefetch(data + pad); /* speedup eth_type_trans() */
689 /* is this an error packet? */
690 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
692 "ERROR flags %x rx packet %u\n",
693 cqe_fp_flags, sw_comp_cons);
694 fp->eth_q_stats.rx_err_discard_pkt++;
698 /* Since we don't have a jumbo ring
699 * copy small packets if mtu > 1500
701 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
702 (len <= RX_COPY_THRESH)) {
703 skb = netdev_alloc_skb_ip_align(bp->dev, len);
706 "ERROR packet dropped because of alloc failure\n");
707 fp->eth_q_stats.rx_skb_alloc_failed++;
710 memcpy(skb->data, data + pad, len);
711 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
713 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
714 dma_unmap_single(&bp->pdev->dev,
715 dma_unmap_addr(rx_buf, mapping),
718 skb = build_skb(data);
719 if (unlikely(!skb)) {
721 fp->eth_q_stats.rx_skb_alloc_failed++;
724 skb_reserve(skb, pad);
727 "ERROR packet dropped because "
728 "of alloc failure\n");
729 fp->eth_q_stats.rx_skb_alloc_failed++;
731 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
737 skb->protocol = eth_type_trans(skb, bp->dev);
739 /* Set Toeplitz hash for a none-LRO skb */
740 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
742 skb_checksum_none_assert(skb);
744 if (bp->dev->features & NETIF_F_RXCSUM) {
746 if (likely(BNX2X_RX_CSUM_OK(cqe)))
747 skb->ip_summed = CHECKSUM_UNNECESSARY;
749 fp->eth_q_stats.hw_csum_err++;
752 skb_record_rx_queue(skb, fp->rx_queue);
754 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
756 __vlan_hwaccel_put_tag(skb,
757 le16_to_cpu(cqe_fp->vlan_tag));
758 napi_gro_receive(&fp->napi, skb);
764 bd_cons = NEXT_RX_IDX(bd_cons);
765 bd_prod = NEXT_RX_IDX(bd_prod);
766 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
769 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
770 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
772 if (rx_pkt == budget)
776 fp->rx_bd_cons = bd_cons;
777 fp->rx_bd_prod = bd_prod_fw;
778 fp->rx_comp_cons = sw_comp_cons;
779 fp->rx_comp_prod = sw_comp_prod;
781 /* Update producers */
782 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
785 fp->rx_pkt += rx_pkt;
791 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
793 struct bnx2x_fastpath *fp = fp_cookie;
794 struct bnx2x *bp = fp->bp;
797 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
798 "[fp %d fw_sd %d igusb %d]\n",
799 fp->index, fp->fw_sb_id, fp->igu_sb_id);
800 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
802 #ifdef BNX2X_STOP_ON_ERROR
803 if (unlikely(bp->panic))
807 /* Handle Rx and Tx according to MSI-X vector */
808 prefetch(fp->rx_cons_sb);
810 for_each_cos_in_tx_queue(fp, cos)
811 prefetch(fp->txdata[cos].tx_cons_sb);
813 prefetch(&fp->sb_running_index[SM_RX_ID]);
814 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
819 /* HW Lock for shared dual port PHYs */
820 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
822 mutex_lock(&bp->port.phy_mutex);
824 if (bp->port.need_hw_lock)
825 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
828 void bnx2x_release_phy_lock(struct bnx2x *bp)
830 if (bp->port.need_hw_lock)
831 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
833 mutex_unlock(&bp->port.phy_mutex);
836 /* calculates MF speed according to current linespeed and MF configuration */
837 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
839 u16 line_speed = bp->link_vars.line_speed;
841 u16 maxCfg = bnx2x_extract_max_cfg(bp,
842 bp->mf_config[BP_VN(bp)]);
844 /* Calculate the current MAX line speed limit for the MF
848 line_speed = (line_speed * maxCfg) / 100;
850 u16 vn_max_rate = maxCfg * 100;
852 if (vn_max_rate < line_speed)
853 line_speed = vn_max_rate;
861 * bnx2x_fill_report_data - fill link report data to report
864 * @data: link state to update
866 * It uses a none-atomic bit operations because is called under the mutex.
868 static inline void bnx2x_fill_report_data(struct bnx2x *bp,
869 struct bnx2x_link_report_data *data)
871 u16 line_speed = bnx2x_get_mf_speed(bp);
873 memset(data, 0, sizeof(*data));
875 /* Fill the report data: efective line speed */
876 data->line_speed = line_speed;
879 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
880 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
881 &data->link_report_flags);
884 if (bp->link_vars.duplex == DUPLEX_FULL)
885 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
887 /* Rx Flow Control is ON */
888 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
889 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
891 /* Tx Flow Control is ON */
892 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
893 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
897 * bnx2x_link_report - report link status to OS.
901 * Calls the __bnx2x_link_report() under the same locking scheme
902 * as a link/PHY state managing code to ensure a consistent link
906 void bnx2x_link_report(struct bnx2x *bp)
908 bnx2x_acquire_phy_lock(bp);
909 __bnx2x_link_report(bp);
910 bnx2x_release_phy_lock(bp);
914 * __bnx2x_link_report - report link status to OS.
918 * None atomic inmlementation.
919 * Should be called under the phy_lock.
921 void __bnx2x_link_report(struct bnx2x *bp)
923 struct bnx2x_link_report_data cur_data;
927 bnx2x_read_mf_cfg(bp);
929 /* Read the current link report info */
930 bnx2x_fill_report_data(bp, &cur_data);
932 /* Don't report link down or exactly the same link status twice */
933 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
934 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
935 &bp->last_reported_link.link_report_flags) &&
936 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
937 &cur_data.link_report_flags)))
942 /* We are going to report a new link parameters now -
943 * remember the current data for the next time.
945 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
947 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
948 &cur_data.link_report_flags)) {
949 netif_carrier_off(bp->dev);
950 netdev_err(bp->dev, "NIC Link is Down\n");
956 netif_carrier_on(bp->dev);
958 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
959 &cur_data.link_report_flags))
964 /* Handle the FC at the end so that only these flags would be
965 * possibly set. This way we may easily check if there is no FC
968 if (cur_data.link_report_flags) {
969 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
970 &cur_data.link_report_flags)) {
971 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
972 &cur_data.link_report_flags))
973 flow = "ON - receive & transmit";
975 flow = "ON - receive";
977 flow = "ON - transmit";
982 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
983 cur_data.line_speed, duplex, flow);
987 void bnx2x_init_rx_rings(struct bnx2x *bp)
989 int func = BP_FUNC(bp);
993 /* Allocate TPA resources */
994 for_each_rx_queue(bp, j) {
995 struct bnx2x_fastpath *fp = &bp->fp[j];
998 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1000 if (!fp->disable_tpa) {
1001 /* Fill the per-aggregtion pool */
1002 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1003 struct bnx2x_agg_info *tpa_info =
1005 struct sw_rx_bd *first_buf =
1006 &tpa_info->first_buf;
1008 first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
1010 if (!first_buf->data) {
1011 BNX2X_ERR("Failed to allocate TPA "
1012 "skb pool for queue[%d] - "
1013 "disabling TPA on this "
1015 bnx2x_free_tpa_pool(bp, fp, i);
1016 fp->disable_tpa = 1;
1019 dma_unmap_addr_set(first_buf, mapping, 0);
1020 tpa_info->tpa_state = BNX2X_TPA_STOP;
1023 /* "next page" elements initialization */
1024 bnx2x_set_next_page_sgl(fp);
1026 /* set SGEs bit mask */
1027 bnx2x_init_sge_ring_bit_mask(fp);
1029 /* Allocate SGEs and initialize the ring elements */
1030 for (i = 0, ring_prod = 0;
1031 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1033 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1034 BNX2X_ERR("was only able to allocate "
1036 BNX2X_ERR("disabling TPA for "
1038 /* Cleanup already allocated elements */
1039 bnx2x_free_rx_sge_range(bp, fp,
1041 bnx2x_free_tpa_pool(bp, fp,
1043 fp->disable_tpa = 1;
1047 ring_prod = NEXT_SGE_IDX(ring_prod);
1050 fp->rx_sge_prod = ring_prod;
1054 for_each_rx_queue(bp, j) {
1055 struct bnx2x_fastpath *fp = &bp->fp[j];
1059 /* Activate BD ring */
1061 * this will generate an interrupt (to the TSTORM)
1062 * must only be done after chip is initialized
1064 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1070 if (CHIP_IS_E1(bp)) {
1071 REG_WR(bp, BAR_USTRORM_INTMEM +
1072 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1073 U64_LO(fp->rx_comp_mapping));
1074 REG_WR(bp, BAR_USTRORM_INTMEM +
1075 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1076 U64_HI(fp->rx_comp_mapping));
1081 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1086 for_each_tx_queue(bp, i) {
1087 struct bnx2x_fastpath *fp = &bp->fp[i];
1088 for_each_cos_in_tx_queue(fp, cos) {
1089 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
1090 unsigned pkts_compl = 0, bytes_compl = 0;
1092 u16 sw_prod = txdata->tx_pkt_prod;
1093 u16 sw_cons = txdata->tx_pkt_cons;
1095 while (sw_cons != sw_prod) {
1096 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1097 &pkts_compl, &bytes_compl);
1100 netdev_tx_reset_queue(
1101 netdev_get_tx_queue(bp->dev, txdata->txq_index));
1106 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1108 struct bnx2x *bp = fp->bp;
1111 /* ring wasn't allocated */
1112 if (fp->rx_buf_ring == NULL)
1115 for (i = 0; i < NUM_RX_BD; i++) {
1116 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1117 u8 *data = rx_buf->data;
1121 dma_unmap_single(&bp->pdev->dev,
1122 dma_unmap_addr(rx_buf, mapping),
1123 fp->rx_buf_size, DMA_FROM_DEVICE);
1125 rx_buf->data = NULL;
1130 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1134 for_each_rx_queue(bp, j) {
1135 struct bnx2x_fastpath *fp = &bp->fp[j];
1137 bnx2x_free_rx_bds(fp);
1139 if (!fp->disable_tpa)
1140 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1144 void bnx2x_free_skbs(struct bnx2x *bp)
1146 bnx2x_free_tx_skbs(bp);
1147 bnx2x_free_rx_skbs(bp);
1150 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1152 /* load old values */
1153 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1155 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1156 /* leave all but MAX value */
1157 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1159 /* set new MAX value */
1160 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1161 & FUNC_MF_CFG_MAX_BW_MASK;
1163 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1168 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1170 * @bp: driver handle
1171 * @nvecs: number of vectors to be released
1173 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1177 if (nvecs == offset)
1179 free_irq(bp->msix_table[offset].vector, bp->dev);
1180 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1181 bp->msix_table[offset].vector);
1184 if (nvecs == offset)
1189 for_each_eth_queue(bp, i) {
1190 if (nvecs == offset)
1192 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
1193 "irq\n", i, bp->msix_table[offset].vector);
1195 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1199 void bnx2x_free_irq(struct bnx2x *bp)
1201 if (bp->flags & USING_MSIX_FLAG)
1202 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1204 else if (bp->flags & USING_MSI_FLAG)
1205 free_irq(bp->pdev->irq, bp->dev);
1207 free_irq(bp->pdev->irq, bp->dev);
1210 int bnx2x_enable_msix(struct bnx2x *bp)
1212 int msix_vec = 0, i, rc, req_cnt;
1214 bp->msix_table[msix_vec].entry = msix_vec;
1215 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1216 bp->msix_table[0].entry);
1220 bp->msix_table[msix_vec].entry = msix_vec;
1221 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1222 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1225 /* We need separate vectors for ETH queues only (not FCoE) */
1226 for_each_eth_queue(bp, i) {
1227 bp->msix_table[msix_vec].entry = msix_vec;
1228 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1229 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1233 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
1235 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1238 * reconfigure number of tx/rx queues according to available
1241 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1242 /* how less vectors we will have? */
1243 int diff = req_cnt - rc;
1246 "Trying to use less MSI-X vectors: %d\n", rc);
1248 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1252 "MSI-X is not attainable rc %d\n", rc);
1256 * decrease number of queues by number of unallocated entries
1258 bp->num_queues -= diff;
1260 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1263 /* fall to INTx if not enough memory */
1265 bp->flags |= DISABLE_MSI_FLAG;
1266 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1270 bp->flags |= USING_MSIX_FLAG;
1275 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1277 int i, rc, offset = 0;
1279 rc = request_irq(bp->msix_table[offset++].vector,
1280 bnx2x_msix_sp_int, 0,
1281 bp->dev->name, bp->dev);
1283 BNX2X_ERR("request sp irq failed\n");
1290 for_each_eth_queue(bp, i) {
1291 struct bnx2x_fastpath *fp = &bp->fp[i];
1292 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1295 rc = request_irq(bp->msix_table[offset].vector,
1296 bnx2x_msix_fp_int, 0, fp->name, fp);
1298 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1299 bp->msix_table[offset].vector, rc);
1300 bnx2x_free_msix_irqs(bp, offset);
1307 i = BNX2X_NUM_ETH_QUEUES(bp);
1308 offset = 1 + CNIC_PRESENT;
1309 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1311 bp->msix_table[0].vector,
1312 0, bp->msix_table[offset].vector,
1313 i - 1, bp->msix_table[offset + i - 1].vector);
1318 int bnx2x_enable_msi(struct bnx2x *bp)
1322 rc = pci_enable_msi(bp->pdev);
1324 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1327 bp->flags |= USING_MSI_FLAG;
1332 static int bnx2x_req_irq(struct bnx2x *bp)
1334 unsigned long flags;
1337 if (bp->flags & USING_MSI_FLAG)
1340 flags = IRQF_SHARED;
1342 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1343 bp->dev->name, bp->dev);
1347 static inline int bnx2x_setup_irqs(struct bnx2x *bp)
1350 if (bp->flags & USING_MSIX_FLAG) {
1351 rc = bnx2x_req_msix_irqs(bp);
1356 rc = bnx2x_req_irq(bp);
1358 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1361 if (bp->flags & USING_MSI_FLAG) {
1362 bp->dev->irq = bp->pdev->irq;
1363 netdev_info(bp->dev, "using MSI IRQ %d\n",
1371 static inline void bnx2x_napi_enable(struct bnx2x *bp)
1375 for_each_rx_queue(bp, i)
1376 napi_enable(&bnx2x_fp(bp, i, napi));
1379 static inline void bnx2x_napi_disable(struct bnx2x *bp)
1383 for_each_rx_queue(bp, i)
1384 napi_disable(&bnx2x_fp(bp, i, napi));
1387 void bnx2x_netif_start(struct bnx2x *bp)
1389 if (netif_running(bp->dev)) {
1390 bnx2x_napi_enable(bp);
1391 bnx2x_int_enable(bp);
1392 if (bp->state == BNX2X_STATE_OPEN)
1393 netif_tx_wake_all_queues(bp->dev);
1397 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1399 bnx2x_int_disable_sync(bp, disable_hw);
1400 bnx2x_napi_disable(bp);
1403 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1405 struct bnx2x *bp = netdev_priv(dev);
1409 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1410 u16 ether_type = ntohs(hdr->h_proto);
1412 /* Skip VLAN tag if present */
1413 if (ether_type == ETH_P_8021Q) {
1414 struct vlan_ethhdr *vhdr =
1415 (struct vlan_ethhdr *)skb->data;
1417 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1420 /* If ethertype is FCoE or FIP - use FCoE ring */
1421 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1422 return bnx2x_fcoe_tx(bp, txq_index);
1425 /* select a non-FCoE queue */
1426 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1429 void bnx2x_set_num_queues(struct bnx2x *bp)
1431 switch (bp->multi_mode) {
1432 case ETH_RSS_MODE_DISABLED:
1435 case ETH_RSS_MODE_REGULAR:
1436 bp->num_queues = bnx2x_calc_num_queues(bp);
1445 /* override in ISCSI SD mod */
1446 if (IS_MF_ISCSI_SD(bp))
1449 /* Add special queues */
1450 bp->num_queues += NON_ETH_CONTEXT_USE;
1454 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1456 * @bp: Driver handle
1458 * We currently support for at most 16 Tx queues for each CoS thus we will
1459 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1462 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1463 * index after all ETH L2 indices.
1465 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1466 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1467 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1469 * The proper configuration of skb->queue_mapping is handled by
1470 * bnx2x_select_queue() and __skb_tx_hash().
1472 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1473 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1475 static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1479 tx = MAX_TXQS_PER_COS * bp->max_cos;
1480 rx = BNX2X_NUM_ETH_QUEUES(bp);
1482 /* account for fcoe queue */
1490 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1492 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1495 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1497 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1501 DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n",
1507 static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1511 for_each_queue(bp, i) {
1512 struct bnx2x_fastpath *fp = &bp->fp[i];
1515 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1518 * Although there are no IP frames expected to arrive to
1519 * this ring we still want to add an
1520 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1523 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1526 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1527 IP_HEADER_ALIGNMENT_PADDING +
1530 BNX2X_FW_RX_ALIGN_END;
1531 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
1535 static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
1538 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1539 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1542 * Prepare the inital contents fo the indirection table if RSS is
1545 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1546 for (i = 0; i < sizeof(ind_table); i++)
1549 ethtool_rxfh_indir_default(i, num_eth_queues);
1553 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1554 * per-port, so if explicit configuration is needed , do it only
1557 * For 57712 and newer on the other hand it's a per-function
1560 return bnx2x_config_rss_pf(bp, ind_table,
1561 bp->port.pmf || !CHIP_IS_E1x(bp));
1564 int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
1566 struct bnx2x_config_rss_params params = {0};
1569 /* Although RSS is meaningless when there is a single HW queue we
1570 * still need it enabled in order to have HW Rx hash generated.
1572 * if (!is_eth_multi(bp))
1573 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1576 params.rss_obj = &bp->rss_conf_obj;
1578 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
1581 switch (bp->multi_mode) {
1582 case ETH_RSS_MODE_DISABLED:
1583 __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags);
1585 case ETH_RSS_MODE_REGULAR:
1586 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
1588 case ETH_RSS_MODE_VLAN_PRI:
1589 __set_bit(BNX2X_RSS_MODE_VLAN_PRI, ¶ms.rss_flags);
1591 case ETH_RSS_MODE_E1HOV_PRI:
1592 __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, ¶ms.rss_flags);
1594 case ETH_RSS_MODE_IP_DSCP:
1595 __set_bit(BNX2X_RSS_MODE_IP_DSCP, ¶ms.rss_flags);
1598 BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
1602 /* If RSS is enabled */
1603 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1604 /* RSS configuration */
1605 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
1606 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
1607 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
1608 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
1611 params.rss_result_mask = MULTI_MASK;
1613 memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
1617 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1618 params.rss_key[i] = random32();
1620 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
1624 return bnx2x_config_rss(bp, ¶ms);
1627 static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1629 struct bnx2x_func_state_params func_params = {0};
1631 /* Prepare parameters for function state transitions */
1632 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1634 func_params.f_obj = &bp->func_obj;
1635 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1637 func_params.params.hw_init.load_phase = load_code;
1639 return bnx2x_func_state_change(bp, &func_params);
1643 * Cleans the object that have internal lists without sending
1644 * ramrods. Should be run when interrutps are disabled.
1646 static void bnx2x_squeeze_objects(struct bnx2x *bp)
1649 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1650 struct bnx2x_mcast_ramrod_params rparam = {0};
1651 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
1653 /***************** Cleanup MACs' object first *************************/
1655 /* Wait for completion of requested */
1656 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1657 /* Perform a dry cleanup */
1658 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1660 /* Clean ETH primary MAC */
1661 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1662 rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
1665 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1667 /* Cleanup UC list */
1669 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1670 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1673 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1675 /***************** Now clean mcast object *****************************/
1676 rparam.mcast_obj = &bp->mcast_obj;
1677 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1679 /* Add a DEL command... */
1680 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1682 BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
1683 "object: %d\n", rc);
1685 /* ...and wait until all pending commands are cleared */
1686 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1689 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1694 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1698 #ifndef BNX2X_STOP_ON_ERROR
1699 #define LOAD_ERROR_EXIT(bp, label) \
1701 (bp)->state = BNX2X_STATE_ERROR; \
1705 #define LOAD_ERROR_EXIT(bp, label) \
1707 (bp)->state = BNX2X_STATE_ERROR; \
1713 /* must be called with rtnl_lock */
1714 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1716 int port = BP_PORT(bp);
1720 #ifdef BNX2X_STOP_ON_ERROR
1721 if (unlikely(bp->panic))
1725 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1727 /* Set the initial link reported state to link down */
1728 bnx2x_acquire_phy_lock(bp);
1729 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1730 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1731 &bp->last_reported_link.link_report_flags);
1732 bnx2x_release_phy_lock(bp);
1734 /* must be called before memory allocation and HW init */
1735 bnx2x_ilt_set_info(bp);
1738 * Zero fastpath structures preserving invariants like napi, which are
1739 * allocated only once, fp index, max_cos, bp pointer.
1740 * Also set fp->disable_tpa.
1742 for_each_queue(bp, i)
1746 /* Set the receive queues buffer size */
1747 bnx2x_set_rx_buf_size(bp);
1749 if (bnx2x_alloc_mem(bp))
1752 /* As long as bnx2x_alloc_mem() may possibly update
1753 * bp->num_queues, bnx2x_set_real_num_queues() should always
1756 rc = bnx2x_set_real_num_queues(bp);
1758 BNX2X_ERR("Unable to set real_num_queues\n");
1759 LOAD_ERROR_EXIT(bp, load_error0);
1762 /* configure multi cos mappings in kernel.
1763 * this configuration may be overriden by a multi class queue discipline
1764 * or by a dcbx negotiation result.
1766 bnx2x_setup_tc(bp->dev, bp->max_cos);
1768 bnx2x_napi_enable(bp);
1770 /* Send LOAD_REQUEST command to MCP
1771 * Returns the type of LOAD command:
1772 * if it is the first port to be initialized
1773 * common blocks should be initialized, otherwise - not
1775 if (!BP_NOMCP(bp)) {
1776 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1778 BNX2X_ERR("MCP response failure, aborting\n");
1780 LOAD_ERROR_EXIT(bp, load_error1);
1782 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1783 rc = -EBUSY; /* other port in diagnostic mode */
1784 LOAD_ERROR_EXIT(bp, load_error1);
1786 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
1787 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
1788 /* build FW version dword */
1789 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1790 (BCM_5710_FW_MINOR_VERSION << 8) +
1791 (BCM_5710_FW_REVISION_VERSION << 16) +
1792 (BCM_5710_FW_ENGINEERING_VERSION << 24);
1794 /* read loaded FW from chip */
1795 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
1797 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x",
1800 /* abort nic load if version mismatch */
1801 if (my_fw != loaded_fw) {
1802 BNX2X_ERR("bnx2x with FW %x already loaded, "
1803 "which mismatches my %x FW. aborting",
1806 LOAD_ERROR_EXIT(bp, load_error2);
1811 int path = BP_PATH(bp);
1813 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1814 path, load_count[path][0], load_count[path][1],
1815 load_count[path][2]);
1816 load_count[path][0]++;
1817 load_count[path][1 + port]++;
1818 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1819 path, load_count[path][0], load_count[path][1],
1820 load_count[path][2]);
1821 if (load_count[path][0] == 1)
1822 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1823 else if (load_count[path][1 + port] == 1)
1824 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1826 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1829 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1830 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
1831 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
1834 * We need the barrier to ensure the ordering between the
1835 * writing to bp->port.pmf here and reading it from the
1836 * bnx2x_periodic_task().
1839 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
1843 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1845 /* Init Function state controlling object */
1846 bnx2x__init_func_obj(bp);
1849 rc = bnx2x_init_hw(bp, load_code);
1851 BNX2X_ERR("HW init failed, aborting\n");
1852 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1853 LOAD_ERROR_EXIT(bp, load_error2);
1856 /* Connect to IRQs */
1857 rc = bnx2x_setup_irqs(bp);
1859 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1860 LOAD_ERROR_EXIT(bp, load_error2);
1863 /* Setup NIC internals and enable interrupts */
1864 bnx2x_nic_init(bp, load_code);
1866 /* Init per-function objects */
1867 bnx2x_init_bp_objs(bp);
1869 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1870 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
1871 (bp->common.shmem2_base)) {
1872 if (SHMEM2_HAS(bp, dcc_support))
1873 SHMEM2_WR(bp, dcc_support,
1874 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1875 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1878 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1879 rc = bnx2x_func_start(bp);
1881 BNX2X_ERR("Function start failed!\n");
1882 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1883 LOAD_ERROR_EXIT(bp, load_error3);
1886 /* Send LOAD_DONE command to MCP */
1887 if (!BP_NOMCP(bp)) {
1888 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1890 BNX2X_ERR("MCP response failure, aborting\n");
1892 LOAD_ERROR_EXIT(bp, load_error3);
1896 rc = bnx2x_setup_leading(bp);
1898 BNX2X_ERR("Setup leading failed!\n");
1899 LOAD_ERROR_EXIT(bp, load_error3);
1903 /* Enable Timer scan */
1904 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
1907 for_each_nondefault_queue(bp, i) {
1908 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
1910 LOAD_ERROR_EXIT(bp, load_error4);
1913 rc = bnx2x_init_rss_pf(bp);
1915 LOAD_ERROR_EXIT(bp, load_error4);
1917 /* Now when Clients are configured we are ready to work */
1918 bp->state = BNX2X_STATE_OPEN;
1920 /* Configure a ucast MAC */
1921 rc = bnx2x_set_eth_mac(bp, true);
1923 LOAD_ERROR_EXIT(bp, load_error4);
1925 if (bp->pending_max) {
1926 bnx2x_update_max_mf_config(bp, bp->pending_max);
1927 bp->pending_max = 0;
1931 bnx2x_initial_phy_init(bp, load_mode);
1933 /* Start fast path */
1935 /* Initialize Rx filter. */
1936 netif_addr_lock_bh(bp->dev);
1937 bnx2x_set_rx_mode(bp->dev);
1938 netif_addr_unlock_bh(bp->dev);
1941 switch (load_mode) {
1943 /* Tx queue should be only reenabled */
1944 netif_tx_wake_all_queues(bp->dev);
1948 netif_tx_start_all_queues(bp->dev);
1949 smp_mb__after_clear_bit();
1953 bp->state = BNX2X_STATE_DIAG;
1961 bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0);
1963 bnx2x__link_status_update(bp);
1965 /* start the timer */
1966 mod_timer(&bp->timer, jiffies + bp->current_interval);
1969 /* re-read iscsi info */
1970 bnx2x_get_iscsi_info(bp);
1971 bnx2x_setup_cnic_irq_info(bp);
1972 if (bp->state == BNX2X_STATE_OPEN)
1973 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1975 bnx2x_inc_load_cnt(bp);
1977 /* Wait for all pending SP commands to complete */
1978 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
1979 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
1980 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
1984 bnx2x_dcbx_init(bp);
1987 #ifndef BNX2X_STOP_ON_ERROR
1990 /* Disable Timer scan */
1991 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
1994 bnx2x_int_disable_sync(bp, 1);
1996 /* Clean queueable objects */
1997 bnx2x_squeeze_objects(bp);
1999 /* Free SKBs, SGEs, TPA pool and driver internals */
2000 bnx2x_free_skbs(bp);
2001 for_each_rx_queue(bp, i)
2002 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2007 if (!BP_NOMCP(bp)) {
2008 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2009 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2014 bnx2x_napi_disable(bp);
2019 #endif /* ! BNX2X_STOP_ON_ERROR */
2022 /* must be called with rtnl_lock */
2023 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
2026 bool global = false;
2028 if ((bp->state == BNX2X_STATE_CLOSED) ||
2029 (bp->state == BNX2X_STATE_ERROR)) {
2030 /* We can get here if the driver has been unloaded
2031 * during parity error recovery and is either waiting for a
2032 * leader to complete or for other functions to unload and
2033 * then ifdown has been issued. In this case we want to
2034 * unload and let other functions to complete a recovery
2037 bp->recovery_state = BNX2X_RECOVERY_DONE;
2039 bnx2x_release_leader_lock(bp);
2042 DP(NETIF_MSG_HW, "Releasing a leadership...\n");
2048 * It's important to set the bp->state to the value different from
2049 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2050 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2052 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2056 bnx2x_tx_disable(bp);
2059 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2062 bp->rx_mode = BNX2X_RX_MODE_NONE;
2064 del_timer_sync(&bp->timer);
2066 /* Set ALWAYS_ALIVE bit in shmem */
2067 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2069 bnx2x_drv_pulse(bp);
2071 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2073 /* Cleanup the chip if needed */
2074 if (unload_mode != UNLOAD_RECOVERY)
2075 bnx2x_chip_cleanup(bp, unload_mode);
2077 /* Send the UNLOAD_REQUEST to the MCP */
2078 bnx2x_send_unload_req(bp, unload_mode);
2081 * Prevent transactions to host from the functions on the
2082 * engine that doesn't reset global blocks in case of global
2083 * attention once gloabl blocks are reset and gates are opened
2084 * (the engine which leader will perform the recovery
2087 if (!CHIP_IS_E1x(bp))
2088 bnx2x_pf_disable(bp);
2090 /* Disable HW interrupts, NAPI */
2091 bnx2x_netif_stop(bp, 1);
2096 /* Report UNLOAD_DONE to MCP */
2097 bnx2x_send_unload_done(bp);
2101 * At this stage no more interrupts will arrive so we may safly clean
2102 * the queueable objects here in case they failed to get cleaned so far.
2104 bnx2x_squeeze_objects(bp);
2106 /* There should be no more pending SP commands at this stage */
2111 /* Free SKBs, SGEs, TPA pool and driver internals */
2112 bnx2x_free_skbs(bp);
2113 for_each_rx_queue(bp, i)
2114 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2118 bp->state = BNX2X_STATE_CLOSED;
2120 /* Check if there are pending parity attentions. If there are - set
2121 * RECOVERY_IN_PROGRESS.
2123 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2124 bnx2x_set_reset_in_progress(bp);
2126 /* Set RESET_IS_GLOBAL if needed */
2128 bnx2x_set_reset_global(bp);
2132 /* The last driver must disable a "close the gate" if there is no
2133 * parity attention or "process kill" pending.
2135 if (!bnx2x_dec_load_cnt(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
2136 bnx2x_disable_close_the_gate(bp);
2141 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2145 /* If there is no power capability, silently succeed */
2147 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
2151 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2155 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2156 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2157 PCI_PM_CTRL_PME_STATUS));
2159 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2160 /* delay required during transition out of D3hot */
2165 /* If there are other clients above don't
2166 shut down the power */
2167 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2169 /* Don't shut down the power for emulation and FPGA */
2170 if (CHIP_REV_IS_SLOW(bp))
2173 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2177 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2179 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2182 /* No more memory access after this point until
2183 * device is brought back to D0.
2194 * net_device service functions
2196 int bnx2x_poll(struct napi_struct *napi, int budget)
2200 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2202 struct bnx2x *bp = fp->bp;
2205 #ifdef BNX2X_STOP_ON_ERROR
2206 if (unlikely(bp->panic)) {
2207 napi_complete(napi);
2212 for_each_cos_in_tx_queue(fp, cos)
2213 if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
2214 bnx2x_tx_int(bp, &fp->txdata[cos]);
2217 if (bnx2x_has_rx_work(fp)) {
2218 work_done += bnx2x_rx_int(fp, budget - work_done);
2220 /* must not complete if we consumed full budget */
2221 if (work_done >= budget)
2225 /* Fall out from the NAPI loop if needed */
2226 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2228 /* No need to update SB for FCoE L2 ring as long as
2229 * it's connected to the default SB and the SB
2230 * has been updated when NAPI was scheduled.
2232 if (IS_FCOE_FP(fp)) {
2233 napi_complete(napi);
2238 bnx2x_update_fpsb_idx(fp);
2239 /* bnx2x_has_rx_work() reads the status block,
2240 * thus we need to ensure that status block indices
2241 * have been actually read (bnx2x_update_fpsb_idx)
2242 * prior to this check (bnx2x_has_rx_work) so that
2243 * we won't write the "newer" value of the status block
2244 * to IGU (if there was a DMA right after
2245 * bnx2x_has_rx_work and if there is no rmb, the memory
2246 * reading (bnx2x_update_fpsb_idx) may be postponed
2247 * to right before bnx2x_ack_sb). In this case there
2248 * will never be another interrupt until there is
2249 * another update of the status block, while there
2250 * is still unhandled work.
2254 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2255 napi_complete(napi);
2256 /* Re-enable interrupts */
2258 "Update index to %d\n", fp->fp_hc_idx);
2259 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2260 le16_to_cpu(fp->fp_hc_idx),
2270 /* we split the first BD into headers and data BDs
2271 * to ease the pain of our fellow microcode engineers
2272 * we use one mapping for both BDs
2273 * So far this has only been observed to happen
2274 * in Other Operating Systems(TM)
2276 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
2277 struct bnx2x_fp_txdata *txdata,
2278 struct sw_tx_bd *tx_buf,
2279 struct eth_tx_start_bd **tx_bd, u16 hlen,
2280 u16 bd_prod, int nbd)
2282 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2283 struct eth_tx_bd *d_tx_bd;
2285 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2287 /* first fix first BD */
2288 h_tx_bd->nbd = cpu_to_le16(nbd);
2289 h_tx_bd->nbytes = cpu_to_le16(hlen);
2291 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
2292 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
2293 h_tx_bd->addr_lo, h_tx_bd->nbd);
2295 /* now get a new data BD
2296 * (after the pbd) and fill it */
2297 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2298 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2300 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2301 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2303 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2304 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2305 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2307 /* this marks the BD as one that has no individual mapping */
2308 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2310 DP(NETIF_MSG_TX_QUEUED,
2311 "TSO split data size is %d (%x:%x)\n",
2312 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2315 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2320 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2323 csum = (u16) ~csum_fold(csum_sub(csum,
2324 csum_partial(t_header - fix, fix, 0)));
2327 csum = (u16) ~csum_fold(csum_add(csum,
2328 csum_partial(t_header, -fix, 0)));
2330 return swab16(csum);
2333 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2337 if (skb->ip_summed != CHECKSUM_PARTIAL)
2341 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
2343 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2344 rc |= XMIT_CSUM_TCP;
2348 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2349 rc |= XMIT_CSUM_TCP;
2353 if (skb_is_gso_v6(skb))
2354 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2355 else if (skb_is_gso(skb))
2356 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
2361 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2362 /* check if packet requires linearization (packet is too fragmented)
2363 no need to check fragmentation if page size > 8K (there will be no
2364 violation to FW restrictions) */
2365 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2370 int first_bd_sz = 0;
2372 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2373 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2375 if (xmit_type & XMIT_GSO) {
2376 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2377 /* Check if LSO packet needs to be copied:
2378 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2379 int wnd_size = MAX_FETCH_BD - 3;
2380 /* Number of windows to check */
2381 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2386 /* Headers length */
2387 hlen = (int)(skb_transport_header(skb) - skb->data) +
2390 /* Amount of data (w/o headers) on linear part of SKB*/
2391 first_bd_sz = skb_headlen(skb) - hlen;
2393 wnd_sum = first_bd_sz;
2395 /* Calculate the first sum - it's special */
2396 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2398 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
2400 /* If there was data on linear skb data - check it */
2401 if (first_bd_sz > 0) {
2402 if (unlikely(wnd_sum < lso_mss)) {
2407 wnd_sum -= first_bd_sz;
2410 /* Others are easier: run through the frag list and
2411 check all windows */
2412 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2414 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
2416 if (unlikely(wnd_sum < lso_mss)) {
2421 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
2424 /* in non-LSO too fragmented packet should always
2431 if (unlikely(to_copy))
2432 DP(NETIF_MSG_TX_QUEUED,
2433 "Linearization IS REQUIRED for %s packet. "
2434 "num_frags %d hlen %d first_bd_sz %d\n",
2435 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2436 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2442 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2445 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2446 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2447 ETH_TX_PARSE_BD_E2_LSO_MSS;
2448 if ((xmit_type & XMIT_GSO_V6) &&
2449 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2450 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
2454 * bnx2x_set_pbd_gso - update PBD in GSO case.
2458 * @xmit_type: xmit flags
2460 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2461 struct eth_tx_parse_bd_e1x *pbd,
2464 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2465 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2466 pbd->tcp_flags = pbd_tcp_flags(skb);
2468 if (xmit_type & XMIT_GSO_V4) {
2469 pbd->ip_id = swab16(ip_hdr(skb)->id);
2470 pbd->tcp_pseudo_csum =
2471 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2473 0, IPPROTO_TCP, 0));
2476 pbd->tcp_pseudo_csum =
2477 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2478 &ipv6_hdr(skb)->daddr,
2479 0, IPPROTO_TCP, 0));
2481 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2485 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2487 * @bp: driver handle
2489 * @parsing_data: data to be updated
2490 * @xmit_type: xmit flags
2494 static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2495 u32 *parsing_data, u32 xmit_type)
2498 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2499 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2500 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
2502 if (xmit_type & XMIT_CSUM_TCP) {
2503 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2504 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2505 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
2507 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2509 /* We support checksum offload for TCP and UDP only.
2510 * No need to pass the UDP header length - it's a constant.
2512 return skb_transport_header(skb) +
2513 sizeof(struct udphdr) - skb->data;
2516 static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2517 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2519 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2521 if (xmit_type & XMIT_CSUM_V4)
2522 tx_start_bd->bd_flags.as_bitfield |=
2523 ETH_TX_BD_FLAGS_IP_CSUM;
2525 tx_start_bd->bd_flags.as_bitfield |=
2526 ETH_TX_BD_FLAGS_IPV6;
2528 if (!(xmit_type & XMIT_CSUM_TCP))
2529 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
2533 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
2535 * @bp: driver handle
2537 * @pbd: parse BD to be updated
2538 * @xmit_type: xmit flags
2540 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2541 struct eth_tx_parse_bd_e1x *pbd,
2544 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
2546 /* for now NS flag is not used in Linux */
2548 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2549 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2551 pbd->ip_hlen_w = (skb_transport_header(skb) -
2552 skb_network_header(skb)) >> 1;
2554 hlen += pbd->ip_hlen_w;
2556 /* We support checksum offload for TCP and UDP only */
2557 if (xmit_type & XMIT_CSUM_TCP)
2558 hlen += tcp_hdrlen(skb) / 2;
2560 hlen += sizeof(struct udphdr) / 2;
2562 pbd->total_hlen_w = cpu_to_le16(hlen);
2565 if (xmit_type & XMIT_CSUM_TCP) {
2566 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2569 s8 fix = SKB_CS_OFF(skb); /* signed! */
2571 DP(NETIF_MSG_TX_QUEUED,
2572 "hlen %d fix %d csum before fix %x\n",
2573 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2575 /* HW bug: fixup the CSUM */
2576 pbd->tcp_pseudo_csum =
2577 bnx2x_csum_fix(skb_transport_header(skb),
2580 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2581 pbd->tcp_pseudo_csum);
2587 /* called with netif_tx_lock
2588 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2589 * netif_wake_queue()
2591 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2593 struct bnx2x *bp = netdev_priv(dev);
2595 struct bnx2x_fastpath *fp;
2596 struct netdev_queue *txq;
2597 struct bnx2x_fp_txdata *txdata;
2598 struct sw_tx_bd *tx_buf;
2599 struct eth_tx_start_bd *tx_start_bd, *first_bd;
2600 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
2601 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
2602 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2603 u32 pbd_e2_parsing_data = 0;
2604 u16 pkt_prod, bd_prod;
2605 int nbd, txq_index, fp_index, txdata_index;
2607 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2610 __le16 pkt_size = 0;
2612 u8 mac_type = UNICAST_ADDRESS;
2614 #ifdef BNX2X_STOP_ON_ERROR
2615 if (unlikely(bp->panic))
2616 return NETDEV_TX_BUSY;
2619 txq_index = skb_get_queue_mapping(skb);
2620 txq = netdev_get_tx_queue(dev, txq_index);
2622 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2624 /* decode the fastpath index and the cos index from the txq */
2625 fp_index = TXQ_TO_FP(txq_index);
2626 txdata_index = TXQ_TO_COS(txq_index);
2630 * Override the above for the FCoE queue:
2631 * - FCoE fp entry is right after the ETH entries.
2632 * - FCoE L2 queue uses bp->txdata[0] only.
2634 if (unlikely(!NO_FCOE(bp) && (txq_index ==
2635 bnx2x_fcoe_tx(bp, txq_index)))) {
2636 fp_index = FCOE_IDX;
2641 /* enable this debug print to view the transmission queue being used
2642 DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d\n",
2643 txq_index, fp_index, txdata_index); */
2645 /* locate the fastpath and the txdata */
2646 fp = &bp->fp[fp_index];
2647 txdata = &fp->txdata[txdata_index];
2649 /* enable this debug print to view the tranmission details
2650 DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
2651 " tx_data ptr %p fp pointer %p\n",
2652 txdata->cid, fp_index, txdata_index, txdata, fp); */
2654 if (unlikely(bnx2x_tx_avail(bp, txdata) <
2655 (skb_shinfo(skb)->nr_frags + 3))) {
2656 fp->eth_q_stats.driver_xoff++;
2657 netif_tx_stop_queue(txq);
2658 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2659 return NETDEV_TX_BUSY;
2662 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
2663 "protocol(%x,%x) gso type %x xmit_type %x\n",
2664 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
2665 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2667 eth = (struct ethhdr *)skb->data;
2669 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2670 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2671 if (is_broadcast_ether_addr(eth->h_dest))
2672 mac_type = BROADCAST_ADDRESS;
2674 mac_type = MULTICAST_ADDRESS;
2677 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2678 /* First, check if we need to linearize the skb (due to FW
2679 restrictions). No need to check fragmentation if page size > 8K
2680 (there will be no violation to FW restrictions) */
2681 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2682 /* Statistics of linearization */
2684 if (skb_linearize(skb) != 0) {
2685 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2686 "silently dropping this SKB\n");
2687 dev_kfree_skb_any(skb);
2688 return NETDEV_TX_OK;
2692 /* Map skb linear data for DMA */
2693 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2694 skb_headlen(skb), DMA_TO_DEVICE);
2695 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2696 DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - "
2697 "silently dropping this SKB\n");
2698 dev_kfree_skb_any(skb);
2699 return NETDEV_TX_OK;
2702 Please read carefully. First we use one BD which we mark as start,
2703 then we have a parsing info BD (used for TSO or xsum),
2704 and only then we have the rest of the TSO BDs.
2705 (don't forget to mark the last one as last,
2706 and to unmap only AFTER you write to the BD ...)
2707 And above all, all pdb sizes are in words - NOT DWORDS!
2710 /* get current pkt produced now - advance it just before sending packet
2711 * since mapping of pages may fail and cause packet to be dropped
2713 pkt_prod = txdata->tx_pkt_prod;
2714 bd_prod = TX_BD(txdata->tx_bd_prod);
2716 /* get a tx_buf and first BD
2717 * tx_start_bd may be changed during SPLIT,
2718 * but first_bd will always stay first
2720 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
2721 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
2722 first_bd = tx_start_bd;
2724 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
2725 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2729 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
2731 /* remember the first BD of the packet */
2732 tx_buf->first_bd = txdata->tx_bd_prod;
2736 DP(NETIF_MSG_TX_QUEUED,
2737 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2738 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
2740 if (vlan_tx_tag_present(skb)) {
2741 tx_start_bd->vlan_or_ethertype =
2742 cpu_to_le16(vlan_tx_tag_get(skb));
2743 tx_start_bd->bd_flags.as_bitfield |=
2744 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
2746 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
2748 /* turn on parsing and get a BD */
2749 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2751 if (xmit_type & XMIT_CSUM)
2752 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
2754 if (!CHIP_IS_E1x(bp)) {
2755 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
2756 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2757 /* Set PBD in checksum offload case */
2758 if (xmit_type & XMIT_CSUM)
2759 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2760 &pbd_e2_parsing_data,
2764 * fill in the MAC addresses in the PBD - for local
2767 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
2768 &pbd_e2->src_mac_addr_mid,
2769 &pbd_e2->src_mac_addr_lo,
2771 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
2772 &pbd_e2->dst_mac_addr_mid,
2773 &pbd_e2->dst_mac_addr_lo,
2777 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
2778 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2779 /* Set PBD in checksum offload case */
2780 if (xmit_type & XMIT_CSUM)
2781 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
2785 /* Setup the data pointer of the first BD of the packet */
2786 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2787 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2788 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
2789 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2790 pkt_size = tx_start_bd->nbytes;
2792 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2793 " nbytes %d flags %x vlan %x\n",
2794 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2795 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
2796 tx_start_bd->bd_flags.as_bitfield,
2797 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
2799 if (xmit_type & XMIT_GSO) {
2801 DP(NETIF_MSG_TX_QUEUED,
2802 "TSO packet len %d hlen %d total len %d tso size %d\n",
2803 skb->len, hlen, skb_headlen(skb),
2804 skb_shinfo(skb)->gso_size);
2806 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2808 if (unlikely(skb_headlen(skb) > hlen))
2809 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
2812 if (!CHIP_IS_E1x(bp))
2813 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2816 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
2819 /* Set the PBD's parsing_data field if not zero
2820 * (for the chips newer than 57711).
2822 if (pbd_e2_parsing_data)
2823 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2825 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2827 /* Handle fragmented skb */
2828 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2829 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2831 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
2832 skb_frag_size(frag), DMA_TO_DEVICE);
2833 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2834 unsigned int pkts_compl = 0, bytes_compl = 0;
2836 DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
2837 "dropping packet...\n");
2839 /* we need unmap all buffers already mapped
2841 * first_bd->nbd need to be properly updated
2842 * before call to bnx2x_free_tx_pkt
2844 first_bd->nbd = cpu_to_le16(nbd);
2845 bnx2x_free_tx_pkt(bp, txdata,
2846 TX_BD(txdata->tx_pkt_prod),
2847 &pkts_compl, &bytes_compl);
2848 return NETDEV_TX_OK;
2851 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2852 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2853 if (total_pkt_bd == NULL)
2854 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2856 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2857 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2858 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
2859 le16_add_cpu(&pkt_size, skb_frag_size(frag));
2862 DP(NETIF_MSG_TX_QUEUED,
2863 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2864 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2865 le16_to_cpu(tx_data_bd->nbytes));
2868 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2870 /* update with actual num BDs */
2871 first_bd->nbd = cpu_to_le16(nbd);
2873 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2875 /* now send a tx doorbell, counting the next BD
2876 * if the packet contains or ends with it
2878 if (TX_BD_POFF(bd_prod) < nbd)
2881 /* total_pkt_bytes should be set on the first data BD if
2882 * it's not an LSO packet and there is more than one
2883 * data BD. In this case pkt_size is limited by an MTU value.
2884 * However we prefer to set it for an LSO packet (while we don't
2885 * have to) in order to save some CPU cycles in a none-LSO
2886 * case, when we much more care about them.
2888 if (total_pkt_bd != NULL)
2889 total_pkt_bd->total_pkt_bytes = pkt_size;
2892 DP(NETIF_MSG_TX_QUEUED,
2893 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2894 " tcp_flags %x xsum %x seq %u hlen %u\n",
2895 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2896 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2897 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2898 le16_to_cpu(pbd_e1x->total_hlen_w));
2900 DP(NETIF_MSG_TX_QUEUED,
2901 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2902 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2903 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2904 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2905 pbd_e2->parsing_data);
2906 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2908 netdev_tx_sent_queue(txq, skb->len);
2910 txdata->tx_pkt_prod++;
2912 * Make sure that the BD data is updated before updating the producer
2913 * since FW might read the BD right after the producer is updated.
2914 * This is only applicable for weak-ordered memory model archs such
2915 * as IA-64. The following barrier is also mandatory since FW will
2916 * assumes packets must have BDs.
2920 txdata->tx_db.data.prod += nbd;
2923 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
2927 txdata->tx_bd_prod += nbd;
2929 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
2930 netif_tx_stop_queue(txq);
2932 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2933 * ordering of set_bit() in netif_tx_stop_queue() and read of
2937 fp->eth_q_stats.driver_xoff++;
2938 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
2939 netif_tx_wake_queue(txq);
2943 return NETDEV_TX_OK;
2947 * bnx2x_setup_tc - routine to configure net_device for multi tc
2949 * @netdev: net device to configure
2950 * @tc: number of traffic classes to enable
2952 * callback connected to the ndo_setup_tc function pointer
2954 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
2956 int cos, prio, count, offset;
2957 struct bnx2x *bp = netdev_priv(dev);
2959 /* setup tc must be called under rtnl lock */
2962 /* no traffic classes requested. aborting */
2964 netdev_reset_tc(dev);
2968 /* requested to support too many traffic classes */
2969 if (num_tc > bp->max_cos) {
2970 DP(NETIF_MSG_TX_ERR, "support for too many traffic classes"
2971 " requested: %d. max supported is %d\n",
2972 num_tc, bp->max_cos);
2976 /* declare amount of supported traffic classes */
2977 if (netdev_set_num_tc(dev, num_tc)) {
2978 DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes\n",
2983 /* configure priority to traffic class mapping */
2984 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
2985 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
2986 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n",
2987 prio, bp->prio_to_cos[prio]);
2991 /* Use this configuration to diffrentiate tc0 from other COSes
2992 This can be used for ets or pfc, and save the effort of setting
2993 up a multio class queue disc or negotiating DCBX with a switch
2994 netdev_set_prio_tc_map(dev, 0, 0);
2995 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
2996 for (prio = 1; prio < 16; prio++) {
2997 netdev_set_prio_tc_map(dev, prio, 1);
2998 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
3001 /* configure traffic class to transmission queue mapping */
3002 for (cos = 0; cos < bp->max_cos; cos++) {
3003 count = BNX2X_NUM_ETH_QUEUES(bp);
3004 offset = cos * MAX_TXQS_PER_COS;
3005 netdev_set_tc_queue(dev, cos, count, offset);
3006 DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d\n",
3007 cos, offset, count);
3013 /* called with rtnl_lock */
3014 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3016 struct sockaddr *addr = p;
3017 struct bnx2x *bp = netdev_priv(dev);
3020 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data))
3024 if (IS_MF_ISCSI_SD(bp) && !is_zero_ether_addr(addr->sa_data))
3028 if (netif_running(dev)) {
3029 rc = bnx2x_set_eth_mac(bp, false);
3034 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3036 if (netif_running(dev))
3037 rc = bnx2x_set_eth_mac(bp, true);
3042 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3044 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3045 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
3050 if (IS_FCOE_IDX(fp_index)) {
3051 memset(sb, 0, sizeof(union host_hc_status_block));
3052 fp->status_blk_mapping = 0;
3057 if (!CHIP_IS_E1x(bp))
3058 BNX2X_PCI_FREE(sb->e2_sb,
3059 bnx2x_fp(bp, fp_index,
3060 status_blk_mapping),
3061 sizeof(struct host_hc_status_block_e2));
3063 BNX2X_PCI_FREE(sb->e1x_sb,
3064 bnx2x_fp(bp, fp_index,
3065 status_blk_mapping),
3066 sizeof(struct host_hc_status_block_e1x));
3071 if (!skip_rx_queue(bp, fp_index)) {
3072 bnx2x_free_rx_bds(fp);
3074 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3075 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3076 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3077 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3078 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3080 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3081 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3082 sizeof(struct eth_fast_path_rx_cqe) *
3086 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3087 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3088 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3089 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3093 if (!skip_tx_queue(bp, fp_index)) {
3094 /* fastpath tx rings: tx_buf tx_desc */
3095 for_each_cos_in_tx_queue(fp, cos) {
3096 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3099 "freeing tx memory of fp %d cos %d cid %d\n",
3100 fp_index, cos, txdata->cid);
3102 BNX2X_FREE(txdata->tx_buf_ring);
3103 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3104 txdata->tx_desc_mapping,
3105 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3108 /* end of fastpath */
3111 void bnx2x_free_fp_mem(struct bnx2x *bp)
3114 for_each_queue(bp, i)
3115 bnx2x_free_fp_mem_at(bp, i);
3118 static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
3120 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
3121 if (!CHIP_IS_E1x(bp)) {
3122 bnx2x_fp(bp, index, sb_index_values) =
3123 (__le16 *)status_blk.e2_sb->sb.index_values;
3124 bnx2x_fp(bp, index, sb_running_index) =
3125 (__le16 *)status_blk.e2_sb->sb.running_index;
3127 bnx2x_fp(bp, index, sb_index_values) =
3128 (__le16 *)status_blk.e1x_sb->sb.index_values;
3129 bnx2x_fp(bp, index, sb_running_index) =
3130 (__le16 *)status_blk.e1x_sb->sb.running_index;
3134 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3136 union host_hc_status_block *sb;
3137 struct bnx2x_fastpath *fp = &bp->fp[index];
3140 int rx_ring_size = 0;
3143 if (!bp->rx_ring_size && IS_MF_ISCSI_SD(bp)) {
3144 rx_ring_size = MIN_RX_SIZE_NONTPA;
3145 bp->rx_ring_size = rx_ring_size;
3148 if (!bp->rx_ring_size) {
3150 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3152 /* allocate at least number of buffers required by FW */
3153 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3154 MIN_RX_SIZE_TPA, rx_ring_size);
3156 bp->rx_ring_size = rx_ring_size;
3157 } else /* if rx_ring_size specified - use it */
3158 rx_ring_size = bp->rx_ring_size;
3161 sb = &bnx2x_fp(bp, index, status_blk);
3163 if (!IS_FCOE_IDX(index)) {
3166 if (!CHIP_IS_E1x(bp))
3167 BNX2X_PCI_ALLOC(sb->e2_sb,
3168 &bnx2x_fp(bp, index, status_blk_mapping),
3169 sizeof(struct host_hc_status_block_e2));
3171 BNX2X_PCI_ALLOC(sb->e1x_sb,
3172 &bnx2x_fp(bp, index, status_blk_mapping),
3173 sizeof(struct host_hc_status_block_e1x));
3178 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3179 * set shortcuts for it.
3181 if (!IS_FCOE_IDX(index))
3182 set_sb_shortcuts(bp, index);
3185 if (!skip_tx_queue(bp, index)) {
3186 /* fastpath tx rings: tx_buf tx_desc */
3187 for_each_cos_in_tx_queue(fp, cos) {
3188 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3190 DP(BNX2X_MSG_SP, "allocating tx memory of "
3194 BNX2X_ALLOC(txdata->tx_buf_ring,
3195 sizeof(struct sw_tx_bd) * NUM_TX_BD);
3196 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3197 &txdata->tx_desc_mapping,
3198 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3203 if (!skip_rx_queue(bp, index)) {
3204 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3205 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3206 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3207 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3208 &bnx2x_fp(bp, index, rx_desc_mapping),
3209 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3211 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3212 &bnx2x_fp(bp, index, rx_comp_mapping),
3213 sizeof(struct eth_fast_path_rx_cqe) *
3217 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3218 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3219 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3220 &bnx2x_fp(bp, index, rx_sge_mapping),
3221 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3223 bnx2x_set_next_page_rx_bd(fp);
3226 bnx2x_set_next_page_rx_cq(fp);
3229 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3230 if (ring_size < rx_ring_size)
3236 /* handles low memory cases */
3238 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3240 /* FW will drop all packets if queue is not big enough,
3241 * In these cases we disable the queue
3242 * Min size is different for OOO, TPA and non-TPA queues
3244 if (ring_size < (fp->disable_tpa ?
3245 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
3246 /* release memory allocated for this queue */
3247 bnx2x_free_fp_mem_at(bp, index);
3253 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3258 * 1. Allocate FP for leading - fatal if error
3259 * 2. {CNIC} Allocate FCoE FP - fatal if error
3260 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3261 * 4. Allocate RSS - fix number of queues if error
3265 if (bnx2x_alloc_fp_mem_at(bp, 0))
3271 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
3272 /* we will fail load process instead of mark
3279 for_each_nondefault_eth_queue(bp, i)
3280 if (bnx2x_alloc_fp_mem_at(bp, i))
3283 /* handle memory failures */
3284 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3285 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3290 * move non eth FPs next to last eth FP
3291 * must be done in that order
3292 * FCOE_IDX < FWD_IDX < OOO_IDX
3295 /* move FCoE fp even NO_FCOE_FLAG is on */
3296 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
3298 bp->num_queues -= delta;
3299 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3300 bp->num_queues + delta, bp->num_queues);
3306 void bnx2x_free_mem_bp(struct bnx2x *bp)
3309 kfree(bp->msix_table);
3313 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3315 struct bnx2x_fastpath *fp;
3316 struct msix_entry *tbl;
3317 struct bnx2x_ilt *ilt;
3318 int msix_table_size = 0;
3321 * The biggest MSI-X table we might need is as a maximum number of fast
3322 * path IGU SBs plus default SB (for PF).
3324 msix_table_size = bp->igu_sb_cnt + 1;
3326 /* fp array: RSS plus CNIC related L2 queues */
3327 fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE,
3328 sizeof(*fp), GFP_KERNEL);
3334 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
3337 bp->msix_table = tbl;
3340 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3347 bnx2x_free_mem_bp(bp);
3352 int bnx2x_reload_if_running(struct net_device *dev)
3354 struct bnx2x *bp = netdev_priv(dev);
3356 if (unlikely(!netif_running(dev)))
3359 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3360 return bnx2x_nic_load(bp, LOAD_NORMAL);
3363 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3365 u32 sel_phy_idx = 0;
3366 if (bp->link_params.num_phys <= 1)
3369 if (bp->link_vars.link_up) {
3370 sel_phy_idx = EXT_PHY1;
3371 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3372 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3373 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3374 sel_phy_idx = EXT_PHY2;
3377 switch (bnx2x_phy_selection(&bp->link_params)) {
3378 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3379 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3380 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3381 sel_phy_idx = EXT_PHY1;
3383 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3384 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3385 sel_phy_idx = EXT_PHY2;
3393 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3395 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3397 * The selected actived PHY is always after swapping (in case PHY
3398 * swapping is enabled). So when swapping is enabled, we need to reverse
3402 if (bp->link_params.multi_phy_config &
3403 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3404 if (sel_phy_idx == EXT_PHY1)
3405 sel_phy_idx = EXT_PHY2;
3406 else if (sel_phy_idx == EXT_PHY2)
3407 sel_phy_idx = EXT_PHY1;
3409 return LINK_CONFIG_IDX(sel_phy_idx);
3412 #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3413 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3415 struct bnx2x *bp = netdev_priv(dev);
3416 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3419 case NETDEV_FCOE_WWNN:
3420 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3421 cp->fcoe_wwn_node_name_lo);
3423 case NETDEV_FCOE_WWPN:
3424 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3425 cp->fcoe_wwn_port_name_lo);
3435 /* called with rtnl_lock */
3436 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3438 struct bnx2x *bp = netdev_priv(dev);
3440 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3441 pr_err("Handling parity error recovery. Try again later\n");
3445 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
3446 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
3449 /* This does not race with packet allocation
3450 * because the actual alloc size is
3451 * only updated as part of load
3455 return bnx2x_reload_if_running(dev);
3458 netdev_features_t bnx2x_fix_features(struct net_device *dev,
3459 netdev_features_t features)
3461 struct bnx2x *bp = netdev_priv(dev);
3463 /* TPA requires Rx CSUM offloading */
3464 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
3465 features &= ~NETIF_F_LRO;
3470 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
3472 struct bnx2x *bp = netdev_priv(dev);
3473 u32 flags = bp->flags;
3474 bool bnx2x_reload = false;
3476 if (features & NETIF_F_LRO)
3477 flags |= TPA_ENABLE_FLAG;
3479 flags &= ~TPA_ENABLE_FLAG;
3481 if (features & NETIF_F_LOOPBACK) {
3482 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3483 bp->link_params.loopback_mode = LOOPBACK_BMAC;
3484 bnx2x_reload = true;
3487 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3488 bp->link_params.loopback_mode = LOOPBACK_NONE;
3489 bnx2x_reload = true;
3493 if (flags ^ bp->flags) {
3495 bnx2x_reload = true;
3499 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3500 return bnx2x_reload_if_running(dev);
3501 /* else: bnx2x_nic_load() will be called at end of recovery */
3507 void bnx2x_tx_timeout(struct net_device *dev)
3509 struct bnx2x *bp = netdev_priv(dev);
3511 #ifdef BNX2X_STOP_ON_ERROR
3516 smp_mb__before_clear_bit();
3517 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3518 smp_mb__after_clear_bit();
3520 /* This allows the netif to be shutdown gracefully before resetting */
3521 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3524 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3526 struct net_device *dev = pci_get_drvdata(pdev);
3530 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3533 bp = netdev_priv(dev);
3537 pci_save_state(pdev);
3539 if (!netif_running(dev)) {
3544 netif_device_detach(dev);
3546 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3548 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3555 int bnx2x_resume(struct pci_dev *pdev)
3557 struct net_device *dev = pci_get_drvdata(pdev);
3562 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3565 bp = netdev_priv(dev);
3567 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3568 pr_err("Handling parity error recovery. Try again later\n");
3574 pci_restore_state(pdev);
3576 if (!netif_running(dev)) {
3581 bnx2x_set_power_state(bp, PCI_D0);
3582 netif_device_attach(dev);
3584 /* Since the chip was reset, clear the FW sequence number */
3586 rc = bnx2x_nic_load(bp, LOAD_OPEN);
3594 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3597 /* ustorm cxt validation */
3598 cxt->ustorm_ag_context.cdu_usage =
3599 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3600 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
3601 /* xcontext validation */
3602 cxt->xstorm_ag_context.cdu_reserved =
3603 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3604 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3607 static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3608 u8 fw_sb_id, u8 sb_index,
3612 u32 addr = BAR_CSTRORM_INTMEM +
3613 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
3614 REG_WR8(bp, addr, ticks);
3615 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
3616 port, fw_sb_id, sb_index, ticks);
3619 static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3620 u16 fw_sb_id, u8 sb_index,
3623 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3624 u32 addr = BAR_CSTRORM_INTMEM +
3625 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
3626 u16 flags = REG_RD16(bp, addr);
3628 flags &= ~HC_INDEX_DATA_HC_ENABLED;
3629 flags |= enable_flag;
3630 REG_WR16(bp, addr, flags);
3631 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
3632 port, fw_sb_id, sb_index, disable);
3635 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
3636 u8 sb_index, u8 disable, u16 usec)
3638 int port = BP_PORT(bp);
3639 u8 ticks = usec / BNX2X_BTR;
3641 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3643 disable = disable ? 1 : (usec ? 0 : 1);
3644 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);