1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2011 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
25 #include <net/ip6_checksum.h>
26 #include <linux/firmware.h>
27 #include <linux/prefetch.h>
28 #include "bnx2x_cmn.h"
29 #include "bnx2x_init.h"
35 * bnx2x_bz_fp - zero content of the fastpath structure.
38 * @index: fastpath index to be zeroed
40 * Makes sure the contents of the bp->fp[index].napi is kept
43 static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
45 struct bnx2x_fastpath *fp = &bp->fp[index];
46 struct napi_struct orig_napi = fp->napi;
47 /* bzero bnx2x_fastpath contents */
48 memset(fp, 0, sizeof(*fp));
50 /* Restore the NAPI object as it has been already initialized */
56 fp->max_cos = bp->max_cos;
58 /* Special queues support only one CoS */
62 * set the tpa flag for each queue. The tpa flag determines the queue
63 * minimal size so it must be set prior to queue memory allocation
65 fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0);
68 /* We don't want TPA on an FCoE L2 ring */
75 * bnx2x_move_fp - move content of the fastpath structure.
78 * @from: source FP index
79 * @to: destination FP index
81 * Makes sure the contents of the bp->fp[to].napi is kept
82 * intact. This is done by first copying the napi struct from
83 * the target to the source, and then mem copying the entire
84 * source onto the target
86 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
88 struct bnx2x_fastpath *from_fp = &bp->fp[from];
89 struct bnx2x_fastpath *to_fp = &bp->fp[to];
91 /* Copy the NAPI object as it has been already initialized */
92 from_fp->napi = to_fp->napi;
94 /* Move bnx2x_fastpath contents */
95 memcpy(to_fp, from_fp, sizeof(*to_fp));
99 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
101 /* free skb in the packet ring at pos idx
102 * return idx of last bd freed
104 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
105 u16 idx, unsigned int *pkts_compl,
106 unsigned int *bytes_compl)
108 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
109 struct eth_tx_start_bd *tx_start_bd;
110 struct eth_tx_bd *tx_data_bd;
111 struct sk_buff *skb = tx_buf->skb;
112 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
115 /* prefetch skb end pointer to speedup dev_kfree_skb() */
118 DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
119 txdata->txq_index, idx, tx_buf, skb);
122 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
123 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
124 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
125 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
128 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
129 #ifdef BNX2X_STOP_ON_ERROR
130 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
131 BNX2X_ERR("BAD nbd!\n");
135 new_cons = nbd + tx_buf->first_bd;
137 /* Get the next bd */
138 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
140 /* Skip a parse bd... */
142 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
144 /* ...and the TSO split header bd since they have no mapping */
145 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
147 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
153 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
154 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
155 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
156 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
158 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
165 (*bytes_compl) += skb->len;
167 dev_kfree_skb_any(skb);
168 tx_buf->first_bd = 0;
174 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
176 struct netdev_queue *txq;
177 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
178 unsigned int pkts_compl = 0, bytes_compl = 0;
180 #ifdef BNX2X_STOP_ON_ERROR
181 if (unlikely(bp->panic))
185 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
186 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
187 sw_cons = txdata->tx_pkt_cons;
189 while (sw_cons != hw_cons) {
192 pkt_cons = TX_BD(sw_cons);
194 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
196 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
198 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
199 &pkts_compl, &bytes_compl);
204 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
206 txdata->tx_pkt_cons = sw_cons;
207 txdata->tx_bd_cons = bd_cons;
209 /* Need to make the tx_bd_cons update visible to start_xmit()
210 * before checking for netif_tx_queue_stopped(). Without the
211 * memory barrier, there is a small possibility that
212 * start_xmit() will miss it and cause the queue to be stopped
214 * On the other hand we need an rmb() here to ensure the proper
215 * ordering of bit testing in the following
216 * netif_tx_queue_stopped(txq) call.
220 if (unlikely(netif_tx_queue_stopped(txq))) {
221 /* Taking tx_lock() is needed to prevent reenabling the queue
222 * while it's empty. This could have happen if rx_action() gets
223 * suspended in bnx2x_tx_int() after the condition before
224 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
226 * stops the queue->sees fresh tx_bd_cons->releases the queue->
227 * sends some packets consuming the whole queue again->
231 __netif_tx_lock(txq, smp_processor_id());
233 if ((netif_tx_queue_stopped(txq)) &&
234 (bp->state == BNX2X_STATE_OPEN) &&
235 (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
236 netif_tx_wake_queue(txq);
238 __netif_tx_unlock(txq);
243 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
246 u16 last_max = fp->last_max_sge;
248 if (SUB_S16(idx, last_max) > 0)
249 fp->last_max_sge = idx;
252 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
253 struct eth_fast_path_rx_cqe *fp_cqe)
255 struct bnx2x *bp = fp->bp;
256 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
257 le16_to_cpu(fp_cqe->len_on_bd)) >>
259 u16 last_max, last_elem, first_elem;
266 /* First mark all used pages */
267 for (i = 0; i < sge_len; i++)
268 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
269 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
271 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
272 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
274 /* Here we assume that the last SGE index is the biggest */
275 prefetch((void *)(fp->sge_mask));
276 bnx2x_update_last_max_sge(fp,
277 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
279 last_max = RX_SGE(fp->last_max_sge);
280 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
281 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
283 /* If ring is not full */
284 if (last_elem + 1 != first_elem)
287 /* Now update the prod */
288 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
289 if (likely(fp->sge_mask[i]))
292 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
293 delta += BIT_VEC64_ELEM_SZ;
297 fp->rx_sge_prod += delta;
298 /* clear page-end entries */
299 bnx2x_clear_sge_mask_next_elems(fp);
302 DP(NETIF_MSG_RX_STATUS,
303 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
304 fp->last_max_sge, fp->rx_sge_prod);
307 /* Set Toeplitz hash value in the skb using the value from the
308 * CQE (calculated by HW).
310 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
311 const struct eth_fast_path_rx_cqe *cqe)
313 /* Set Toeplitz hash from CQE */
314 if ((bp->dev->features & NETIF_F_RXHASH) &&
315 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
316 return le32_to_cpu(cqe->rss_hash_result);
320 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
322 struct eth_fast_path_rx_cqe *cqe)
324 struct bnx2x *bp = fp->bp;
325 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
326 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
327 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
329 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
330 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
332 /* print error if current state != stop */
333 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
334 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
336 /* Try to map an empty data buffer from the aggregation info */
337 mapping = dma_map_single(&bp->pdev->dev,
338 first_buf->data + NET_SKB_PAD,
339 fp->rx_buf_size, DMA_FROM_DEVICE);
341 * ...if it fails - move the skb from the consumer to the producer
342 * and set the current aggregation state as ERROR to drop it
343 * when TPA_STOP arrives.
346 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
347 /* Move the BD from the consumer to the producer */
348 bnx2x_reuse_rx_data(fp, cons, prod);
349 tpa_info->tpa_state = BNX2X_TPA_ERROR;
353 /* move empty data from pool to prod */
354 prod_rx_buf->data = first_buf->data;
355 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
356 /* point prod_bd to new data */
357 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
358 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
360 /* move partial skb from cons to pool (don't unmap yet) */
361 *first_buf = *cons_rx_buf;
363 /* mark bin state as START */
364 tpa_info->parsing_flags =
365 le16_to_cpu(cqe->pars_flags.flags);
366 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
367 tpa_info->tpa_state = BNX2X_TPA_START;
368 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
369 tpa_info->placement_offset = cqe->placement_offset;
370 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
372 #ifdef BNX2X_STOP_ON_ERROR
373 fp->tpa_queue_used |= (1 << queue);
374 #ifdef _ASM_GENERIC_INT_L64_H
375 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
377 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
383 /* Timestamp option length allowed for TPA aggregation:
385 * nop nop kind length echo val
387 #define TPA_TSTAMP_OPT_LEN 12
389 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
392 * @parsing_flags: parsing flags from the START CQE
393 * @len_on_bd: total length of the first packet for the
396 * Approximate value of the MSS for this aggregation calculated using
397 * the first packet of it.
399 static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
403 * TPA arrgregation won't have either IP options or TCP options
404 * other than timestamp or IPv6 extension headers.
406 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
408 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
409 PRS_FLAG_OVERETH_IPV6)
410 hdrs_len += sizeof(struct ipv6hdr);
412 hdrs_len += sizeof(struct iphdr);
415 /* Check if there was a TCP timestamp, if there is it's will
416 * always be 12 bytes length: nop nop kind length echo val.
418 * Otherwise FW would close the aggregation.
420 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
421 hdrs_len += TPA_TSTAMP_OPT_LEN;
423 return len_on_bd - hdrs_len;
426 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
427 u16 queue, struct sk_buff *skb,
428 struct eth_end_agg_rx_cqe *cqe,
431 struct sw_rx_page *rx_pg, old_rx_pg;
432 u32 i, frag_len, frag_size, pages;
435 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
436 u16 len_on_bd = tpa_info->len_on_bd;
438 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
439 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
441 /* This is needed in order to enable forwarding support */
443 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
444 tpa_info->parsing_flags, len_on_bd);
446 #ifdef BNX2X_STOP_ON_ERROR
447 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
448 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
450 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
456 /* Run through the SGL and compose the fragmented skb */
457 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
458 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
460 /* FW gives the indices of the SGE as if the ring is an array
461 (meaning that "next" element will consume 2 indices) */
462 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
463 rx_pg = &fp->rx_page_ring[sge_idx];
466 /* If we fail to allocate a substitute page, we simply stop
467 where we are and drop the whole packet */
468 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
470 fp->eth_q_stats.rx_skb_alloc_failed++;
474 /* Unmap the page as we r going to pass it to the stack */
475 dma_unmap_page(&bp->pdev->dev,
476 dma_unmap_addr(&old_rx_pg, mapping),
477 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
479 /* Add one frag and update the appropriate fields in the skb */
480 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
482 skb->data_len += frag_len;
483 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
484 skb->len += frag_len;
486 frag_size -= frag_len;
492 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
493 u16 queue, struct eth_end_agg_rx_cqe *cqe,
496 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
497 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
498 u32 pad = tpa_info->placement_offset;
499 u16 len = tpa_info->len_on_bd;
500 struct sk_buff *skb = NULL;
501 u8 *data = rx_buf->data;
504 u8 old_tpa_state = tpa_info->tpa_state;
506 tpa_info->tpa_state = BNX2X_TPA_STOP;
508 /* If we there was an error during the handling of the TPA_START -
509 * drop this aggregation.
511 if (old_tpa_state == BNX2X_TPA_ERROR)
514 /* Try to allocate the new data */
515 new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
517 /* Unmap skb in the pool anyway, as we are going to change
518 pool entry status to BNX2X_TPA_STOP even if new skb allocation
520 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
521 fp->rx_buf_size, DMA_FROM_DEVICE);
522 if (likely(new_data))
523 skb = build_skb(data);
527 #ifdef BNX2X_STOP_ON_ERROR
528 if (pad + len > fp->rx_buf_size) {
529 BNX2X_ERR("skb_put is about to fail... "
530 "pad %d len %d rx_buf_size %d\n",
531 pad, len, fp->rx_buf_size);
537 skb_reserve(skb, pad + NET_SKB_PAD);
539 skb->rxhash = tpa_info->rxhash;
541 skb->protocol = eth_type_trans(skb, bp->dev);
542 skb->ip_summed = CHECKSUM_UNNECESSARY;
544 if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
545 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
546 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
547 napi_gro_receive(&fp->napi, skb);
549 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
550 " - dropping packet!\n");
551 dev_kfree_skb_any(skb);
555 /* put new data in bin */
556 rx_buf->data = new_data;
562 /* drop the packet and keep the buffer in the bin */
563 DP(NETIF_MSG_RX_STATUS,
564 "Failed to allocate or map a new skb - dropping packet!\n");
565 fp->eth_q_stats.rx_skb_alloc_failed++;
569 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
571 struct bnx2x *bp = fp->bp;
572 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
573 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
576 #ifdef BNX2X_STOP_ON_ERROR
577 if (unlikely(bp->panic))
581 /* CQ "next element" is of the size of the regular element,
582 that's why it's ok here */
583 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
584 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
587 bd_cons = fp->rx_bd_cons;
588 bd_prod = fp->rx_bd_prod;
589 bd_prod_fw = bd_prod;
590 sw_comp_cons = fp->rx_comp_cons;
591 sw_comp_prod = fp->rx_comp_prod;
593 /* Memory barrier necessary as speculative reads of the rx
594 * buffer can be ahead of the index in the status block
598 DP(NETIF_MSG_RX_STATUS,
599 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
600 fp->index, hw_comp_cons, sw_comp_cons);
602 while (sw_comp_cons != hw_comp_cons) {
603 struct sw_rx_bd *rx_buf = NULL;
605 union eth_rx_cqe *cqe;
606 struct eth_fast_path_rx_cqe *cqe_fp;
608 enum eth_rx_cqe_type cqe_fp_type;
612 #ifdef BNX2X_STOP_ON_ERROR
613 if (unlikely(bp->panic))
617 comp_ring_cons = RCQ_BD(sw_comp_cons);
618 bd_prod = RX_BD(bd_prod);
619 bd_cons = RX_BD(bd_cons);
621 cqe = &fp->rx_comp_ring[comp_ring_cons];
622 cqe_fp = &cqe->fast_path_cqe;
623 cqe_fp_flags = cqe_fp->type_error_flags;
624 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
626 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
627 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
628 cqe_fp_flags, cqe_fp->status_flags,
629 le32_to_cpu(cqe_fp->rss_hash_result),
630 le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
632 /* is this a slowpath msg? */
633 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
634 bnx2x_sp_event(fp, cqe);
637 rx_buf = &fp->rx_buf_ring[bd_cons];
640 if (!CQE_TYPE_FAST(cqe_fp_type)) {
641 #ifdef BNX2X_STOP_ON_ERROR
643 if (fp->disable_tpa &&
644 (CQE_TYPE_START(cqe_fp_type) ||
645 CQE_TYPE_STOP(cqe_fp_type)))
646 BNX2X_ERR("START/STOP packet while "
647 "disable_tpa type %x\n",
648 CQE_TYPE(cqe_fp_type));
651 if (CQE_TYPE_START(cqe_fp_type)) {
652 u16 queue = cqe_fp->queue_index;
653 DP(NETIF_MSG_RX_STATUS,
654 "calling tpa_start on queue %d\n",
657 bnx2x_tpa_start(fp, queue,
663 cqe->end_agg_cqe.queue_index;
664 DP(NETIF_MSG_RX_STATUS,
665 "calling tpa_stop on queue %d\n",
668 bnx2x_tpa_stop(bp, fp, queue,
671 #ifdef BNX2X_STOP_ON_ERROR
676 bnx2x_update_sge_prod(fp, cqe_fp);
681 len = le16_to_cpu(cqe_fp->pkt_len);
682 pad = cqe_fp->placement_offset;
683 dma_sync_single_for_cpu(&bp->pdev->dev,
684 dma_unmap_addr(rx_buf, mapping),
685 pad + RX_COPY_THRESH,
688 prefetch(data + pad); /* speedup eth_type_trans() */
689 /* is this an error packet? */
690 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
692 "ERROR flags %x rx packet %u\n",
693 cqe_fp_flags, sw_comp_cons);
694 fp->eth_q_stats.rx_err_discard_pkt++;
698 /* Since we don't have a jumbo ring
699 * copy small packets if mtu > 1500
701 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
702 (len <= RX_COPY_THRESH)) {
703 skb = netdev_alloc_skb_ip_align(bp->dev, len);
706 "ERROR packet dropped because of alloc failure\n");
707 fp->eth_q_stats.rx_skb_alloc_failed++;
710 memcpy(skb->data, data + pad, len);
711 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
713 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
714 dma_unmap_single(&bp->pdev->dev,
715 dma_unmap_addr(rx_buf, mapping),
718 skb = build_skb(data);
719 if (unlikely(!skb)) {
721 fp->eth_q_stats.rx_skb_alloc_failed++;
724 skb_reserve(skb, pad);
727 "ERROR packet dropped because "
728 "of alloc failure\n");
729 fp->eth_q_stats.rx_skb_alloc_failed++;
731 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
737 skb->protocol = eth_type_trans(skb, bp->dev);
739 /* Set Toeplitz hash for a none-LRO skb */
740 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
742 skb_checksum_none_assert(skb);
744 if (bp->dev->features & NETIF_F_RXCSUM) {
746 if (likely(BNX2X_RX_CSUM_OK(cqe)))
747 skb->ip_summed = CHECKSUM_UNNECESSARY;
749 fp->eth_q_stats.hw_csum_err++;
752 skb_record_rx_queue(skb, fp->rx_queue);
754 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
756 __vlan_hwaccel_put_tag(skb,
757 le16_to_cpu(cqe_fp->vlan_tag));
758 napi_gro_receive(&fp->napi, skb);
764 bd_cons = NEXT_RX_IDX(bd_cons);
765 bd_prod = NEXT_RX_IDX(bd_prod);
766 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
769 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
770 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
772 if (rx_pkt == budget)
776 fp->rx_bd_cons = bd_cons;
777 fp->rx_bd_prod = bd_prod_fw;
778 fp->rx_comp_cons = sw_comp_cons;
779 fp->rx_comp_prod = sw_comp_prod;
781 /* Update producers */
782 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
785 fp->rx_pkt += rx_pkt;
791 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
793 struct bnx2x_fastpath *fp = fp_cookie;
794 struct bnx2x *bp = fp->bp;
797 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
798 "[fp %d fw_sd %d igusb %d]\n",
799 fp->index, fp->fw_sb_id, fp->igu_sb_id);
800 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
802 #ifdef BNX2X_STOP_ON_ERROR
803 if (unlikely(bp->panic))
807 /* Handle Rx and Tx according to MSI-X vector */
808 prefetch(fp->rx_cons_sb);
810 for_each_cos_in_tx_queue(fp, cos)
811 prefetch(fp->txdata[cos].tx_cons_sb);
813 prefetch(&fp->sb_running_index[SM_RX_ID]);
814 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
819 /* HW Lock for shared dual port PHYs */
820 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
822 mutex_lock(&bp->port.phy_mutex);
824 if (bp->port.need_hw_lock)
825 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
828 void bnx2x_release_phy_lock(struct bnx2x *bp)
830 if (bp->port.need_hw_lock)
831 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
833 mutex_unlock(&bp->port.phy_mutex);
836 /* calculates MF speed according to current linespeed and MF configuration */
837 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
839 u16 line_speed = bp->link_vars.line_speed;
841 u16 maxCfg = bnx2x_extract_max_cfg(bp,
842 bp->mf_config[BP_VN(bp)]);
844 /* Calculate the current MAX line speed limit for the MF
848 line_speed = (line_speed * maxCfg) / 100;
850 u16 vn_max_rate = maxCfg * 100;
852 if (vn_max_rate < line_speed)
853 line_speed = vn_max_rate;
861 * bnx2x_fill_report_data - fill link report data to report
864 * @data: link state to update
866 * It uses a none-atomic bit operations because is called under the mutex.
868 static inline void bnx2x_fill_report_data(struct bnx2x *bp,
869 struct bnx2x_link_report_data *data)
871 u16 line_speed = bnx2x_get_mf_speed(bp);
873 memset(data, 0, sizeof(*data));
875 /* Fill the report data: efective line speed */
876 data->line_speed = line_speed;
879 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
880 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
881 &data->link_report_flags);
884 if (bp->link_vars.duplex == DUPLEX_FULL)
885 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
887 /* Rx Flow Control is ON */
888 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
889 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
891 /* Tx Flow Control is ON */
892 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
893 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
897 * bnx2x_link_report - report link status to OS.
901 * Calls the __bnx2x_link_report() under the same locking scheme
902 * as a link/PHY state managing code to ensure a consistent link
906 void bnx2x_link_report(struct bnx2x *bp)
908 bnx2x_acquire_phy_lock(bp);
909 __bnx2x_link_report(bp);
910 bnx2x_release_phy_lock(bp);
914 * __bnx2x_link_report - report link status to OS.
918 * None atomic inmlementation.
919 * Should be called under the phy_lock.
921 void __bnx2x_link_report(struct bnx2x *bp)
923 struct bnx2x_link_report_data cur_data;
927 bnx2x_read_mf_cfg(bp);
929 /* Read the current link report info */
930 bnx2x_fill_report_data(bp, &cur_data);
932 /* Don't report link down or exactly the same link status twice */
933 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
934 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
935 &bp->last_reported_link.link_report_flags) &&
936 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
937 &cur_data.link_report_flags)))
942 /* We are going to report a new link parameters now -
943 * remember the current data for the next time.
945 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
947 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
948 &cur_data.link_report_flags)) {
949 netif_carrier_off(bp->dev);
950 netdev_err(bp->dev, "NIC Link is Down\n");
956 netif_carrier_on(bp->dev);
958 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
959 &cur_data.link_report_flags))
964 /* Handle the FC at the end so that only these flags would be
965 * possibly set. This way we may easily check if there is no FC
968 if (cur_data.link_report_flags) {
969 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
970 &cur_data.link_report_flags)) {
971 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
972 &cur_data.link_report_flags))
973 flow = "ON - receive & transmit";
975 flow = "ON - receive";
977 flow = "ON - transmit";
982 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
983 cur_data.line_speed, duplex, flow);
987 void bnx2x_init_rx_rings(struct bnx2x *bp)
989 int func = BP_FUNC(bp);
993 /* Allocate TPA resources */
994 for_each_rx_queue(bp, j) {
995 struct bnx2x_fastpath *fp = &bp->fp[j];
998 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1000 if (!fp->disable_tpa) {
1001 /* Fill the per-aggregtion pool */
1002 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1003 struct bnx2x_agg_info *tpa_info =
1005 struct sw_rx_bd *first_buf =
1006 &tpa_info->first_buf;
1008 first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
1010 if (!first_buf->data) {
1011 BNX2X_ERR("Failed to allocate TPA "
1012 "skb pool for queue[%d] - "
1013 "disabling TPA on this "
1015 bnx2x_free_tpa_pool(bp, fp, i);
1016 fp->disable_tpa = 1;
1019 dma_unmap_addr_set(first_buf, mapping, 0);
1020 tpa_info->tpa_state = BNX2X_TPA_STOP;
1023 /* "next page" elements initialization */
1024 bnx2x_set_next_page_sgl(fp);
1026 /* set SGEs bit mask */
1027 bnx2x_init_sge_ring_bit_mask(fp);
1029 /* Allocate SGEs and initialize the ring elements */
1030 for (i = 0, ring_prod = 0;
1031 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1033 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1034 BNX2X_ERR("was only able to allocate "
1036 BNX2X_ERR("disabling TPA for "
1038 /* Cleanup already allocated elements */
1039 bnx2x_free_rx_sge_range(bp, fp,
1041 bnx2x_free_tpa_pool(bp, fp,
1043 fp->disable_tpa = 1;
1047 ring_prod = NEXT_SGE_IDX(ring_prod);
1050 fp->rx_sge_prod = ring_prod;
1054 for_each_rx_queue(bp, j) {
1055 struct bnx2x_fastpath *fp = &bp->fp[j];
1059 /* Activate BD ring */
1061 * this will generate an interrupt (to the TSTORM)
1062 * must only be done after chip is initialized
1064 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1070 if (CHIP_IS_E1(bp)) {
1071 REG_WR(bp, BAR_USTRORM_INTMEM +
1072 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1073 U64_LO(fp->rx_comp_mapping));
1074 REG_WR(bp, BAR_USTRORM_INTMEM +
1075 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1076 U64_HI(fp->rx_comp_mapping));
1081 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1086 for_each_tx_queue(bp, i) {
1087 struct bnx2x_fastpath *fp = &bp->fp[i];
1088 for_each_cos_in_tx_queue(fp, cos) {
1089 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
1090 unsigned pkts_compl = 0, bytes_compl = 0;
1092 u16 sw_prod = txdata->tx_pkt_prod;
1093 u16 sw_cons = txdata->tx_pkt_cons;
1095 while (sw_cons != sw_prod) {
1096 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1097 &pkts_compl, &bytes_compl);
1100 netdev_tx_reset_queue(
1101 netdev_get_tx_queue(bp->dev, txdata->txq_index));
1106 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1108 struct bnx2x *bp = fp->bp;
1111 /* ring wasn't allocated */
1112 if (fp->rx_buf_ring == NULL)
1115 for (i = 0; i < NUM_RX_BD; i++) {
1116 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1117 u8 *data = rx_buf->data;
1121 dma_unmap_single(&bp->pdev->dev,
1122 dma_unmap_addr(rx_buf, mapping),
1123 fp->rx_buf_size, DMA_FROM_DEVICE);
1125 rx_buf->data = NULL;
1130 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1134 for_each_rx_queue(bp, j) {
1135 struct bnx2x_fastpath *fp = &bp->fp[j];
1137 bnx2x_free_rx_bds(fp);
1139 if (!fp->disable_tpa)
1140 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1144 void bnx2x_free_skbs(struct bnx2x *bp)
1146 bnx2x_free_tx_skbs(bp);
1147 bnx2x_free_rx_skbs(bp);
1150 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1152 /* load old values */
1153 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1155 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1156 /* leave all but MAX value */
1157 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1159 /* set new MAX value */
1160 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1161 & FUNC_MF_CFG_MAX_BW_MASK;
1163 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1168 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1170 * @bp: driver handle
1171 * @nvecs: number of vectors to be released
1173 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1177 if (nvecs == offset)
1179 free_irq(bp->msix_table[offset].vector, bp->dev);
1180 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1181 bp->msix_table[offset].vector);
1184 if (nvecs == offset)
1189 for_each_eth_queue(bp, i) {
1190 if (nvecs == offset)
1192 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
1193 "irq\n", i, bp->msix_table[offset].vector);
1195 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1199 void bnx2x_free_irq(struct bnx2x *bp)
1201 if (bp->flags & USING_MSIX_FLAG)
1202 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1204 else if (bp->flags & USING_MSI_FLAG)
1205 free_irq(bp->pdev->irq, bp->dev);
1207 free_irq(bp->pdev->irq, bp->dev);
1210 int bnx2x_enable_msix(struct bnx2x *bp)
1212 int msix_vec = 0, i, rc, req_cnt;
1214 bp->msix_table[msix_vec].entry = msix_vec;
1215 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1216 bp->msix_table[0].entry);
1220 bp->msix_table[msix_vec].entry = msix_vec;
1221 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1222 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1225 /* We need separate vectors for ETH queues only (not FCoE) */
1226 for_each_eth_queue(bp, i) {
1227 bp->msix_table[msix_vec].entry = msix_vec;
1228 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1229 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1233 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
1235 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1238 * reconfigure number of tx/rx queues according to available
1241 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1242 /* how less vectors we will have? */
1243 int diff = req_cnt - rc;
1246 "Trying to use less MSI-X vectors: %d\n", rc);
1248 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1252 "MSI-X is not attainable rc %d\n", rc);
1256 * decrease number of queues by number of unallocated entries
1258 bp->num_queues -= diff;
1260 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1263 /* fall to INTx if not enough memory */
1265 bp->flags |= DISABLE_MSI_FLAG;
1266 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1270 bp->flags |= USING_MSIX_FLAG;
1275 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1277 int i, rc, offset = 0;
1279 rc = request_irq(bp->msix_table[offset++].vector,
1280 bnx2x_msix_sp_int, 0,
1281 bp->dev->name, bp->dev);
1283 BNX2X_ERR("request sp irq failed\n");
1290 for_each_eth_queue(bp, i) {
1291 struct bnx2x_fastpath *fp = &bp->fp[i];
1292 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1295 rc = request_irq(bp->msix_table[offset].vector,
1296 bnx2x_msix_fp_int, 0, fp->name, fp);
1298 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1299 bp->msix_table[offset].vector, rc);
1300 bnx2x_free_msix_irqs(bp, offset);
1307 i = BNX2X_NUM_ETH_QUEUES(bp);
1308 offset = 1 + CNIC_PRESENT;
1309 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1311 bp->msix_table[0].vector,
1312 0, bp->msix_table[offset].vector,
1313 i - 1, bp->msix_table[offset + i - 1].vector);
1318 int bnx2x_enable_msi(struct bnx2x *bp)
1322 rc = pci_enable_msi(bp->pdev);
1324 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1327 bp->flags |= USING_MSI_FLAG;
1332 static int bnx2x_req_irq(struct bnx2x *bp)
1334 unsigned long flags;
1337 if (bp->flags & USING_MSI_FLAG)
1340 flags = IRQF_SHARED;
1342 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1343 bp->dev->name, bp->dev);
1347 static inline int bnx2x_setup_irqs(struct bnx2x *bp)
1350 if (bp->flags & USING_MSIX_FLAG) {
1351 rc = bnx2x_req_msix_irqs(bp);
1356 rc = bnx2x_req_irq(bp);
1358 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1361 if (bp->flags & USING_MSI_FLAG) {
1362 bp->dev->irq = bp->pdev->irq;
1363 netdev_info(bp->dev, "using MSI IRQ %d\n",
1371 static inline void bnx2x_napi_enable(struct bnx2x *bp)
1375 for_each_rx_queue(bp, i)
1376 napi_enable(&bnx2x_fp(bp, i, napi));
1379 static inline void bnx2x_napi_disable(struct bnx2x *bp)
1383 for_each_rx_queue(bp, i)
1384 napi_disable(&bnx2x_fp(bp, i, napi));
1387 void bnx2x_netif_start(struct bnx2x *bp)
1389 if (netif_running(bp->dev)) {
1390 bnx2x_napi_enable(bp);
1391 bnx2x_int_enable(bp);
1392 if (bp->state == BNX2X_STATE_OPEN)
1393 netif_tx_wake_all_queues(bp->dev);
1397 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1399 bnx2x_int_disable_sync(bp, disable_hw);
1400 bnx2x_napi_disable(bp);
1403 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1405 struct bnx2x *bp = netdev_priv(dev);
1409 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1410 u16 ether_type = ntohs(hdr->h_proto);
1412 /* Skip VLAN tag if present */
1413 if (ether_type == ETH_P_8021Q) {
1414 struct vlan_ethhdr *vhdr =
1415 (struct vlan_ethhdr *)skb->data;
1417 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1420 /* If ethertype is FCoE or FIP - use FCoE ring */
1421 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1422 return bnx2x_fcoe_tx(bp, txq_index);
1425 /* select a non-FCoE queue */
1426 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1429 void bnx2x_set_num_queues(struct bnx2x *bp)
1431 switch (bp->multi_mode) {
1432 case ETH_RSS_MODE_DISABLED:
1435 case ETH_RSS_MODE_REGULAR:
1436 bp->num_queues = bnx2x_calc_num_queues(bp);
1445 /* override in ISCSI SD mod */
1446 if (IS_MF_ISCSI_SD(bp))
1449 /* Add special queues */
1450 bp->num_queues += NON_ETH_CONTEXT_USE;
1454 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1456 * @bp: Driver handle
1458 * We currently support for at most 16 Tx queues for each CoS thus we will
1459 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1462 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1463 * index after all ETH L2 indices.
1465 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1466 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1467 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1469 * The proper configuration of skb->queue_mapping is handled by
1470 * bnx2x_select_queue() and __skb_tx_hash().
1472 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1473 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1475 static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1479 tx = MAX_TXQS_PER_COS * bp->max_cos;
1480 rx = BNX2X_NUM_ETH_QUEUES(bp);
1482 /* account for fcoe queue */
1490 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1492 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1495 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1497 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1501 DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n",
1507 static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1511 for_each_queue(bp, i) {
1512 struct bnx2x_fastpath *fp = &bp->fp[i];
1515 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1518 * Although there are no IP frames expected to arrive to
1519 * this ring we still want to add an
1520 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1523 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1526 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1527 IP_HEADER_ALIGNMENT_PADDING +
1530 BNX2X_FW_RX_ALIGN_END;
1531 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
1535 static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
1538 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1539 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1542 * Prepare the inital contents fo the indirection table if RSS is
1545 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1546 for (i = 0; i < sizeof(ind_table); i++)
1549 ethtool_rxfh_indir_default(i, num_eth_queues);
1553 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1554 * per-port, so if explicit configuration is needed , do it only
1557 * For 57712 and newer on the other hand it's a per-function
1560 return bnx2x_config_rss_pf(bp, ind_table,
1561 bp->port.pmf || !CHIP_IS_E1x(bp));
1564 int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
1566 struct bnx2x_config_rss_params params = {0};
1569 /* Although RSS is meaningless when there is a single HW queue we
1570 * still need it enabled in order to have HW Rx hash generated.
1572 * if (!is_eth_multi(bp))
1573 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1576 params.rss_obj = &bp->rss_conf_obj;
1578 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
1581 switch (bp->multi_mode) {
1582 case ETH_RSS_MODE_DISABLED:
1583 __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags);
1585 case ETH_RSS_MODE_REGULAR:
1586 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
1588 case ETH_RSS_MODE_VLAN_PRI:
1589 __set_bit(BNX2X_RSS_MODE_VLAN_PRI, ¶ms.rss_flags);
1591 case ETH_RSS_MODE_E1HOV_PRI:
1592 __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, ¶ms.rss_flags);
1594 case ETH_RSS_MODE_IP_DSCP:
1595 __set_bit(BNX2X_RSS_MODE_IP_DSCP, ¶ms.rss_flags);
1598 BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
1602 /* If RSS is enabled */
1603 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1604 /* RSS configuration */
1605 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
1606 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
1607 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
1608 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
1611 params.rss_result_mask = MULTI_MASK;
1613 memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
1617 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1618 params.rss_key[i] = random32();
1620 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
1624 return bnx2x_config_rss(bp, ¶ms);
1627 static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1629 struct bnx2x_func_state_params func_params = {0};
1631 /* Prepare parameters for function state transitions */
1632 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1634 func_params.f_obj = &bp->func_obj;
1635 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1637 func_params.params.hw_init.load_phase = load_code;
1639 return bnx2x_func_state_change(bp, &func_params);
1643 * Cleans the object that have internal lists without sending
1644 * ramrods. Should be run when interrutps are disabled.
1646 static void bnx2x_squeeze_objects(struct bnx2x *bp)
1649 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1650 struct bnx2x_mcast_ramrod_params rparam = {0};
1651 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
1653 /***************** Cleanup MACs' object first *************************/
1655 /* Wait for completion of requested */
1656 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1657 /* Perform a dry cleanup */
1658 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1660 /* Clean ETH primary MAC */
1661 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1662 rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
1665 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1667 /* Cleanup UC list */
1669 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1670 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1673 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1675 /***************** Now clean mcast object *****************************/
1676 rparam.mcast_obj = &bp->mcast_obj;
1677 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1679 /* Add a DEL command... */
1680 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1682 BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
1683 "object: %d\n", rc);
1685 /* ...and wait until all pending commands are cleared */
1686 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1689 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1694 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1698 #ifndef BNX2X_STOP_ON_ERROR
1699 #define LOAD_ERROR_EXIT(bp, label) \
1701 (bp)->state = BNX2X_STATE_ERROR; \
1705 #define LOAD_ERROR_EXIT(bp, label) \
1707 (bp)->state = BNX2X_STATE_ERROR; \
1713 /* must be called with rtnl_lock */
1714 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1716 int port = BP_PORT(bp);
1720 #ifdef BNX2X_STOP_ON_ERROR
1721 if (unlikely(bp->panic))
1725 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1727 /* Set the initial link reported state to link down */
1728 bnx2x_acquire_phy_lock(bp);
1729 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1730 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1731 &bp->last_reported_link.link_report_flags);
1732 bnx2x_release_phy_lock(bp);
1734 /* must be called before memory allocation and HW init */
1735 bnx2x_ilt_set_info(bp);
1738 * Zero fastpath structures preserving invariants like napi, which are
1739 * allocated only once, fp index, max_cos, bp pointer.
1740 * Also set fp->disable_tpa.
1742 for_each_queue(bp, i)
1746 /* Set the receive queues buffer size */
1747 bnx2x_set_rx_buf_size(bp);
1749 if (bnx2x_alloc_mem(bp))
1752 /* As long as bnx2x_alloc_mem() may possibly update
1753 * bp->num_queues, bnx2x_set_real_num_queues() should always
1756 rc = bnx2x_set_real_num_queues(bp);
1758 BNX2X_ERR("Unable to set real_num_queues\n");
1759 LOAD_ERROR_EXIT(bp, load_error0);
1762 /* configure multi cos mappings in kernel.
1763 * this configuration may be overriden by a multi class queue discipline
1764 * or by a dcbx negotiation result.
1766 bnx2x_setup_tc(bp->dev, bp->max_cos);
1768 bnx2x_napi_enable(bp);
1770 /* set pf load just before approaching the MCP */
1771 bnx2x_set_pf_load(bp);
1773 /* Send LOAD_REQUEST command to MCP
1774 * Returns the type of LOAD command:
1775 * if it is the first port to be initialized
1776 * common blocks should be initialized, otherwise - not
1778 if (!BP_NOMCP(bp)) {
1779 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1781 BNX2X_ERR("MCP response failure, aborting\n");
1783 LOAD_ERROR_EXIT(bp, load_error1);
1785 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1786 rc = -EBUSY; /* other port in diagnostic mode */
1787 LOAD_ERROR_EXIT(bp, load_error1);
1789 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
1790 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
1791 /* build FW version dword */
1792 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1793 (BCM_5710_FW_MINOR_VERSION << 8) +
1794 (BCM_5710_FW_REVISION_VERSION << 16) +
1795 (BCM_5710_FW_ENGINEERING_VERSION << 24);
1797 /* read loaded FW from chip */
1798 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
1800 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x",
1803 /* abort nic load if version mismatch */
1804 if (my_fw != loaded_fw) {
1805 BNX2X_ERR("bnx2x with FW %x already loaded, "
1806 "which mismatches my %x FW. aborting",
1809 LOAD_ERROR_EXIT(bp, load_error2);
1814 int path = BP_PATH(bp);
1816 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1817 path, load_count[path][0], load_count[path][1],
1818 load_count[path][2]);
1819 load_count[path][0]++;
1820 load_count[path][1 + port]++;
1821 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1822 path, load_count[path][0], load_count[path][1],
1823 load_count[path][2]);
1824 if (load_count[path][0] == 1)
1825 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1826 else if (load_count[path][1 + port] == 1)
1827 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1829 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1832 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1833 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
1834 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
1837 * We need the barrier to ensure the ordering between the
1838 * writing to bp->port.pmf here and reading it from the
1839 * bnx2x_periodic_task().
1842 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
1846 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1848 /* Init Function state controlling object */
1849 bnx2x__init_func_obj(bp);
1852 rc = bnx2x_init_hw(bp, load_code);
1854 BNX2X_ERR("HW init failed, aborting\n");
1855 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1856 LOAD_ERROR_EXIT(bp, load_error2);
1859 /* Connect to IRQs */
1860 rc = bnx2x_setup_irqs(bp);
1862 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1863 LOAD_ERROR_EXIT(bp, load_error2);
1866 /* Setup NIC internals and enable interrupts */
1867 bnx2x_nic_init(bp, load_code);
1869 /* Init per-function objects */
1870 bnx2x_init_bp_objs(bp);
1872 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1873 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
1874 (bp->common.shmem2_base)) {
1875 if (SHMEM2_HAS(bp, dcc_support))
1876 SHMEM2_WR(bp, dcc_support,
1877 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1878 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1881 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1882 rc = bnx2x_func_start(bp);
1884 BNX2X_ERR("Function start failed!\n");
1885 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1886 LOAD_ERROR_EXIT(bp, load_error3);
1889 /* Send LOAD_DONE command to MCP */
1890 if (!BP_NOMCP(bp)) {
1891 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1893 BNX2X_ERR("MCP response failure, aborting\n");
1895 LOAD_ERROR_EXIT(bp, load_error3);
1899 rc = bnx2x_setup_leading(bp);
1901 BNX2X_ERR("Setup leading failed!\n");
1902 LOAD_ERROR_EXIT(bp, load_error3);
1906 /* Enable Timer scan */
1907 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
1910 for_each_nondefault_queue(bp, i) {
1911 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
1913 LOAD_ERROR_EXIT(bp, load_error4);
1916 rc = bnx2x_init_rss_pf(bp);
1918 LOAD_ERROR_EXIT(bp, load_error4);
1920 /* Now when Clients are configured we are ready to work */
1921 bp->state = BNX2X_STATE_OPEN;
1923 /* Configure a ucast MAC */
1924 rc = bnx2x_set_eth_mac(bp, true);
1926 LOAD_ERROR_EXIT(bp, load_error4);
1928 if (bp->pending_max) {
1929 bnx2x_update_max_mf_config(bp, bp->pending_max);
1930 bp->pending_max = 0;
1934 bnx2x_initial_phy_init(bp, load_mode);
1936 /* Start fast path */
1938 /* Initialize Rx filter. */
1939 netif_addr_lock_bh(bp->dev);
1940 bnx2x_set_rx_mode(bp->dev);
1941 netif_addr_unlock_bh(bp->dev);
1944 switch (load_mode) {
1946 /* Tx queue should be only reenabled */
1947 netif_tx_wake_all_queues(bp->dev);
1951 netif_tx_start_all_queues(bp->dev);
1952 smp_mb__after_clear_bit();
1956 bp->state = BNX2X_STATE_DIAG;
1964 bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0);
1966 bnx2x__link_status_update(bp);
1968 /* start the timer */
1969 mod_timer(&bp->timer, jiffies + bp->current_interval);
1972 /* re-read iscsi info */
1973 bnx2x_get_iscsi_info(bp);
1974 bnx2x_setup_cnic_irq_info(bp);
1975 if (bp->state == BNX2X_STATE_OPEN)
1976 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1979 /* Wait for all pending SP commands to complete */
1980 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
1981 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
1982 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
1986 bnx2x_dcbx_init(bp);
1989 #ifndef BNX2X_STOP_ON_ERROR
1992 /* Disable Timer scan */
1993 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
1996 bnx2x_int_disable_sync(bp, 1);
1998 /* Clean queueable objects */
1999 bnx2x_squeeze_objects(bp);
2001 /* Free SKBs, SGEs, TPA pool and driver internals */
2002 bnx2x_free_skbs(bp);
2003 for_each_rx_queue(bp, i)
2004 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2009 if (!BP_NOMCP(bp)) {
2010 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2011 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2016 bnx2x_napi_disable(bp);
2017 /* clear pf_load status, as it was already set */
2018 bnx2x_clear_pf_load(bp);
2023 #endif /* ! BNX2X_STOP_ON_ERROR */
2026 /* must be called with rtnl_lock */
2027 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
2030 bool global = false;
2032 if ((bp->state == BNX2X_STATE_CLOSED) ||
2033 (bp->state == BNX2X_STATE_ERROR)) {
2034 /* We can get here if the driver has been unloaded
2035 * during parity error recovery and is either waiting for a
2036 * leader to complete or for other functions to unload and
2037 * then ifdown has been issued. In this case we want to
2038 * unload and let other functions to complete a recovery
2041 bp->recovery_state = BNX2X_RECOVERY_DONE;
2043 bnx2x_release_leader_lock(bp);
2046 DP(NETIF_MSG_HW, "Releasing a leadership...\n");
2052 * It's important to set the bp->state to the value different from
2053 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2054 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2056 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2060 bnx2x_tx_disable(bp);
2063 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2066 bp->rx_mode = BNX2X_RX_MODE_NONE;
2068 del_timer_sync(&bp->timer);
2070 /* Set ALWAYS_ALIVE bit in shmem */
2071 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2073 bnx2x_drv_pulse(bp);
2075 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2077 /* Cleanup the chip if needed */
2078 if (unload_mode != UNLOAD_RECOVERY)
2079 bnx2x_chip_cleanup(bp, unload_mode);
2081 /* Send the UNLOAD_REQUEST to the MCP */
2082 bnx2x_send_unload_req(bp, unload_mode);
2085 * Prevent transactions to host from the functions on the
2086 * engine that doesn't reset global blocks in case of global
2087 * attention once gloabl blocks are reset and gates are opened
2088 * (the engine which leader will perform the recovery
2091 if (!CHIP_IS_E1x(bp))
2092 bnx2x_pf_disable(bp);
2094 /* Disable HW interrupts, NAPI */
2095 bnx2x_netif_stop(bp, 1);
2100 /* Report UNLOAD_DONE to MCP */
2101 bnx2x_send_unload_done(bp);
2105 * At this stage no more interrupts will arrive so we may safly clean
2106 * the queueable objects here in case they failed to get cleaned so far.
2108 bnx2x_squeeze_objects(bp);
2110 /* There should be no more pending SP commands at this stage */
2115 /* Free SKBs, SGEs, TPA pool and driver internals */
2116 bnx2x_free_skbs(bp);
2117 for_each_rx_queue(bp, i)
2118 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2122 bp->state = BNX2X_STATE_CLOSED;
2124 /* Check if there are pending parity attentions. If there are - set
2125 * RECOVERY_IN_PROGRESS.
2127 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2128 bnx2x_set_reset_in_progress(bp);
2130 /* Set RESET_IS_GLOBAL if needed */
2132 bnx2x_set_reset_global(bp);
2136 /* The last driver must disable a "close the gate" if there is no
2137 * parity attention or "process kill" pending.
2139 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
2140 bnx2x_disable_close_the_gate(bp);
2145 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2149 /* If there is no power capability, silently succeed */
2151 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
2155 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2159 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2160 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2161 PCI_PM_CTRL_PME_STATUS));
2163 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2164 /* delay required during transition out of D3hot */
2169 /* If there are other clients above don't
2170 shut down the power */
2171 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2173 /* Don't shut down the power for emulation and FPGA */
2174 if (CHIP_REV_IS_SLOW(bp))
2177 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2181 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2183 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2186 /* No more memory access after this point until
2187 * device is brought back to D0.
2198 * net_device service functions
2200 int bnx2x_poll(struct napi_struct *napi, int budget)
2204 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2206 struct bnx2x *bp = fp->bp;
2209 #ifdef BNX2X_STOP_ON_ERROR
2210 if (unlikely(bp->panic)) {
2211 napi_complete(napi);
2216 for_each_cos_in_tx_queue(fp, cos)
2217 if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
2218 bnx2x_tx_int(bp, &fp->txdata[cos]);
2221 if (bnx2x_has_rx_work(fp)) {
2222 work_done += bnx2x_rx_int(fp, budget - work_done);
2224 /* must not complete if we consumed full budget */
2225 if (work_done >= budget)
2229 /* Fall out from the NAPI loop if needed */
2230 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2232 /* No need to update SB for FCoE L2 ring as long as
2233 * it's connected to the default SB and the SB
2234 * has been updated when NAPI was scheduled.
2236 if (IS_FCOE_FP(fp)) {
2237 napi_complete(napi);
2242 bnx2x_update_fpsb_idx(fp);
2243 /* bnx2x_has_rx_work() reads the status block,
2244 * thus we need to ensure that status block indices
2245 * have been actually read (bnx2x_update_fpsb_idx)
2246 * prior to this check (bnx2x_has_rx_work) so that
2247 * we won't write the "newer" value of the status block
2248 * to IGU (if there was a DMA right after
2249 * bnx2x_has_rx_work and if there is no rmb, the memory
2250 * reading (bnx2x_update_fpsb_idx) may be postponed
2251 * to right before bnx2x_ack_sb). In this case there
2252 * will never be another interrupt until there is
2253 * another update of the status block, while there
2254 * is still unhandled work.
2258 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2259 napi_complete(napi);
2260 /* Re-enable interrupts */
2262 "Update index to %d\n", fp->fp_hc_idx);
2263 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2264 le16_to_cpu(fp->fp_hc_idx),
2274 /* we split the first BD into headers and data BDs
2275 * to ease the pain of our fellow microcode engineers
2276 * we use one mapping for both BDs
2277 * So far this has only been observed to happen
2278 * in Other Operating Systems(TM)
2280 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
2281 struct bnx2x_fp_txdata *txdata,
2282 struct sw_tx_bd *tx_buf,
2283 struct eth_tx_start_bd **tx_bd, u16 hlen,
2284 u16 bd_prod, int nbd)
2286 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2287 struct eth_tx_bd *d_tx_bd;
2289 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2291 /* first fix first BD */
2292 h_tx_bd->nbd = cpu_to_le16(nbd);
2293 h_tx_bd->nbytes = cpu_to_le16(hlen);
2295 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
2296 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
2297 h_tx_bd->addr_lo, h_tx_bd->nbd);
2299 /* now get a new data BD
2300 * (after the pbd) and fill it */
2301 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2302 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2304 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2305 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2307 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2308 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2309 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2311 /* this marks the BD as one that has no individual mapping */
2312 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2314 DP(NETIF_MSG_TX_QUEUED,
2315 "TSO split data size is %d (%x:%x)\n",
2316 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2319 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2324 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2327 csum = (u16) ~csum_fold(csum_sub(csum,
2328 csum_partial(t_header - fix, fix, 0)));
2331 csum = (u16) ~csum_fold(csum_add(csum,
2332 csum_partial(t_header, -fix, 0)));
2334 return swab16(csum);
2337 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2341 if (skb->ip_summed != CHECKSUM_PARTIAL)
2345 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
2347 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2348 rc |= XMIT_CSUM_TCP;
2352 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2353 rc |= XMIT_CSUM_TCP;
2357 if (skb_is_gso_v6(skb))
2358 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2359 else if (skb_is_gso(skb))
2360 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
2365 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2366 /* check if packet requires linearization (packet is too fragmented)
2367 no need to check fragmentation if page size > 8K (there will be no
2368 violation to FW restrictions) */
2369 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2374 int first_bd_sz = 0;
2376 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2377 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2379 if (xmit_type & XMIT_GSO) {
2380 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2381 /* Check if LSO packet needs to be copied:
2382 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2383 int wnd_size = MAX_FETCH_BD - 3;
2384 /* Number of windows to check */
2385 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2390 /* Headers length */
2391 hlen = (int)(skb_transport_header(skb) - skb->data) +
2394 /* Amount of data (w/o headers) on linear part of SKB*/
2395 first_bd_sz = skb_headlen(skb) - hlen;
2397 wnd_sum = first_bd_sz;
2399 /* Calculate the first sum - it's special */
2400 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2402 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
2404 /* If there was data on linear skb data - check it */
2405 if (first_bd_sz > 0) {
2406 if (unlikely(wnd_sum < lso_mss)) {
2411 wnd_sum -= first_bd_sz;
2414 /* Others are easier: run through the frag list and
2415 check all windows */
2416 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2418 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
2420 if (unlikely(wnd_sum < lso_mss)) {
2425 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
2428 /* in non-LSO too fragmented packet should always
2435 if (unlikely(to_copy))
2436 DP(NETIF_MSG_TX_QUEUED,
2437 "Linearization IS REQUIRED for %s packet. "
2438 "num_frags %d hlen %d first_bd_sz %d\n",
2439 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2440 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2446 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2449 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2450 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2451 ETH_TX_PARSE_BD_E2_LSO_MSS;
2452 if ((xmit_type & XMIT_GSO_V6) &&
2453 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2454 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
2458 * bnx2x_set_pbd_gso - update PBD in GSO case.
2462 * @xmit_type: xmit flags
2464 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2465 struct eth_tx_parse_bd_e1x *pbd,
2468 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2469 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2470 pbd->tcp_flags = pbd_tcp_flags(skb);
2472 if (xmit_type & XMIT_GSO_V4) {
2473 pbd->ip_id = swab16(ip_hdr(skb)->id);
2474 pbd->tcp_pseudo_csum =
2475 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2477 0, IPPROTO_TCP, 0));
2480 pbd->tcp_pseudo_csum =
2481 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2482 &ipv6_hdr(skb)->daddr,
2483 0, IPPROTO_TCP, 0));
2485 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2489 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2491 * @bp: driver handle
2493 * @parsing_data: data to be updated
2494 * @xmit_type: xmit flags
2498 static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2499 u32 *parsing_data, u32 xmit_type)
2502 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2503 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2504 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
2506 if (xmit_type & XMIT_CSUM_TCP) {
2507 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2508 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2509 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
2511 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2513 /* We support checksum offload for TCP and UDP only.
2514 * No need to pass the UDP header length - it's a constant.
2516 return skb_transport_header(skb) +
2517 sizeof(struct udphdr) - skb->data;
2520 static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2521 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2523 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2525 if (xmit_type & XMIT_CSUM_V4)
2526 tx_start_bd->bd_flags.as_bitfield |=
2527 ETH_TX_BD_FLAGS_IP_CSUM;
2529 tx_start_bd->bd_flags.as_bitfield |=
2530 ETH_TX_BD_FLAGS_IPV6;
2532 if (!(xmit_type & XMIT_CSUM_TCP))
2533 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
2537 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
2539 * @bp: driver handle
2541 * @pbd: parse BD to be updated
2542 * @xmit_type: xmit flags
2544 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2545 struct eth_tx_parse_bd_e1x *pbd,
2548 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
2550 /* for now NS flag is not used in Linux */
2552 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2553 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2555 pbd->ip_hlen_w = (skb_transport_header(skb) -
2556 skb_network_header(skb)) >> 1;
2558 hlen += pbd->ip_hlen_w;
2560 /* We support checksum offload for TCP and UDP only */
2561 if (xmit_type & XMIT_CSUM_TCP)
2562 hlen += tcp_hdrlen(skb) / 2;
2564 hlen += sizeof(struct udphdr) / 2;
2566 pbd->total_hlen_w = cpu_to_le16(hlen);
2569 if (xmit_type & XMIT_CSUM_TCP) {
2570 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2573 s8 fix = SKB_CS_OFF(skb); /* signed! */
2575 DP(NETIF_MSG_TX_QUEUED,
2576 "hlen %d fix %d csum before fix %x\n",
2577 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2579 /* HW bug: fixup the CSUM */
2580 pbd->tcp_pseudo_csum =
2581 bnx2x_csum_fix(skb_transport_header(skb),
2584 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2585 pbd->tcp_pseudo_csum);
2591 /* called with netif_tx_lock
2592 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2593 * netif_wake_queue()
2595 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2597 struct bnx2x *bp = netdev_priv(dev);
2599 struct bnx2x_fastpath *fp;
2600 struct netdev_queue *txq;
2601 struct bnx2x_fp_txdata *txdata;
2602 struct sw_tx_bd *tx_buf;
2603 struct eth_tx_start_bd *tx_start_bd, *first_bd;
2604 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
2605 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
2606 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2607 u32 pbd_e2_parsing_data = 0;
2608 u16 pkt_prod, bd_prod;
2609 int nbd, txq_index, fp_index, txdata_index;
2611 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2614 __le16 pkt_size = 0;
2616 u8 mac_type = UNICAST_ADDRESS;
2618 #ifdef BNX2X_STOP_ON_ERROR
2619 if (unlikely(bp->panic))
2620 return NETDEV_TX_BUSY;
2623 txq_index = skb_get_queue_mapping(skb);
2624 txq = netdev_get_tx_queue(dev, txq_index);
2626 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2628 /* decode the fastpath index and the cos index from the txq */
2629 fp_index = TXQ_TO_FP(txq_index);
2630 txdata_index = TXQ_TO_COS(txq_index);
2634 * Override the above for the FCoE queue:
2635 * - FCoE fp entry is right after the ETH entries.
2636 * - FCoE L2 queue uses bp->txdata[0] only.
2638 if (unlikely(!NO_FCOE(bp) && (txq_index ==
2639 bnx2x_fcoe_tx(bp, txq_index)))) {
2640 fp_index = FCOE_IDX;
2645 /* enable this debug print to view the transmission queue being used
2646 DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d\n",
2647 txq_index, fp_index, txdata_index); */
2649 /* locate the fastpath and the txdata */
2650 fp = &bp->fp[fp_index];
2651 txdata = &fp->txdata[txdata_index];
2653 /* enable this debug print to view the tranmission details
2654 DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
2655 " tx_data ptr %p fp pointer %p\n",
2656 txdata->cid, fp_index, txdata_index, txdata, fp); */
2658 if (unlikely(bnx2x_tx_avail(bp, txdata) <
2659 (skb_shinfo(skb)->nr_frags + 3))) {
2660 fp->eth_q_stats.driver_xoff++;
2661 netif_tx_stop_queue(txq);
2662 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2663 return NETDEV_TX_BUSY;
2666 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
2667 "protocol(%x,%x) gso type %x xmit_type %x\n",
2668 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
2669 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2671 eth = (struct ethhdr *)skb->data;
2673 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2674 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2675 if (is_broadcast_ether_addr(eth->h_dest))
2676 mac_type = BROADCAST_ADDRESS;
2678 mac_type = MULTICAST_ADDRESS;
2681 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2682 /* First, check if we need to linearize the skb (due to FW
2683 restrictions). No need to check fragmentation if page size > 8K
2684 (there will be no violation to FW restrictions) */
2685 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2686 /* Statistics of linearization */
2688 if (skb_linearize(skb) != 0) {
2689 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2690 "silently dropping this SKB\n");
2691 dev_kfree_skb_any(skb);
2692 return NETDEV_TX_OK;
2696 /* Map skb linear data for DMA */
2697 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2698 skb_headlen(skb), DMA_TO_DEVICE);
2699 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2700 DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - "
2701 "silently dropping this SKB\n");
2702 dev_kfree_skb_any(skb);
2703 return NETDEV_TX_OK;
2706 Please read carefully. First we use one BD which we mark as start,
2707 then we have a parsing info BD (used for TSO or xsum),
2708 and only then we have the rest of the TSO BDs.
2709 (don't forget to mark the last one as last,
2710 and to unmap only AFTER you write to the BD ...)
2711 And above all, all pdb sizes are in words - NOT DWORDS!
2714 /* get current pkt produced now - advance it just before sending packet
2715 * since mapping of pages may fail and cause packet to be dropped
2717 pkt_prod = txdata->tx_pkt_prod;
2718 bd_prod = TX_BD(txdata->tx_bd_prod);
2720 /* get a tx_buf and first BD
2721 * tx_start_bd may be changed during SPLIT,
2722 * but first_bd will always stay first
2724 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
2725 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
2726 first_bd = tx_start_bd;
2728 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
2729 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2733 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
2735 /* remember the first BD of the packet */
2736 tx_buf->first_bd = txdata->tx_bd_prod;
2740 DP(NETIF_MSG_TX_QUEUED,
2741 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2742 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
2744 if (vlan_tx_tag_present(skb)) {
2745 tx_start_bd->vlan_or_ethertype =
2746 cpu_to_le16(vlan_tx_tag_get(skb));
2747 tx_start_bd->bd_flags.as_bitfield |=
2748 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
2750 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
2752 /* turn on parsing and get a BD */
2753 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2755 if (xmit_type & XMIT_CSUM)
2756 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
2758 if (!CHIP_IS_E1x(bp)) {
2759 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
2760 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2761 /* Set PBD in checksum offload case */
2762 if (xmit_type & XMIT_CSUM)
2763 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2764 &pbd_e2_parsing_data,
2768 * fill in the MAC addresses in the PBD - for local
2771 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
2772 &pbd_e2->src_mac_addr_mid,
2773 &pbd_e2->src_mac_addr_lo,
2775 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
2776 &pbd_e2->dst_mac_addr_mid,
2777 &pbd_e2->dst_mac_addr_lo,
2781 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
2782 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2783 /* Set PBD in checksum offload case */
2784 if (xmit_type & XMIT_CSUM)
2785 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
2789 /* Setup the data pointer of the first BD of the packet */
2790 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2791 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2792 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
2793 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2794 pkt_size = tx_start_bd->nbytes;
2796 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2797 " nbytes %d flags %x vlan %x\n",
2798 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2799 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
2800 tx_start_bd->bd_flags.as_bitfield,
2801 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
2803 if (xmit_type & XMIT_GSO) {
2805 DP(NETIF_MSG_TX_QUEUED,
2806 "TSO packet len %d hlen %d total len %d tso size %d\n",
2807 skb->len, hlen, skb_headlen(skb),
2808 skb_shinfo(skb)->gso_size);
2810 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2812 if (unlikely(skb_headlen(skb) > hlen))
2813 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
2816 if (!CHIP_IS_E1x(bp))
2817 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2820 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
2823 /* Set the PBD's parsing_data field if not zero
2824 * (for the chips newer than 57711).
2826 if (pbd_e2_parsing_data)
2827 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2829 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2831 /* Handle fragmented skb */
2832 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2833 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2835 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
2836 skb_frag_size(frag), DMA_TO_DEVICE);
2837 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2838 unsigned int pkts_compl = 0, bytes_compl = 0;
2840 DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
2841 "dropping packet...\n");
2843 /* we need unmap all buffers already mapped
2845 * first_bd->nbd need to be properly updated
2846 * before call to bnx2x_free_tx_pkt
2848 first_bd->nbd = cpu_to_le16(nbd);
2849 bnx2x_free_tx_pkt(bp, txdata,
2850 TX_BD(txdata->tx_pkt_prod),
2851 &pkts_compl, &bytes_compl);
2852 return NETDEV_TX_OK;
2855 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2856 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2857 if (total_pkt_bd == NULL)
2858 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2860 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2861 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2862 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
2863 le16_add_cpu(&pkt_size, skb_frag_size(frag));
2866 DP(NETIF_MSG_TX_QUEUED,
2867 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2868 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2869 le16_to_cpu(tx_data_bd->nbytes));
2872 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2874 /* update with actual num BDs */
2875 first_bd->nbd = cpu_to_le16(nbd);
2877 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2879 /* now send a tx doorbell, counting the next BD
2880 * if the packet contains or ends with it
2882 if (TX_BD_POFF(bd_prod) < nbd)
2885 /* total_pkt_bytes should be set on the first data BD if
2886 * it's not an LSO packet and there is more than one
2887 * data BD. In this case pkt_size is limited by an MTU value.
2888 * However we prefer to set it for an LSO packet (while we don't
2889 * have to) in order to save some CPU cycles in a none-LSO
2890 * case, when we much more care about them.
2892 if (total_pkt_bd != NULL)
2893 total_pkt_bd->total_pkt_bytes = pkt_size;
2896 DP(NETIF_MSG_TX_QUEUED,
2897 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2898 " tcp_flags %x xsum %x seq %u hlen %u\n",
2899 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2900 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2901 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2902 le16_to_cpu(pbd_e1x->total_hlen_w));
2904 DP(NETIF_MSG_TX_QUEUED,
2905 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2906 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2907 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2908 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2909 pbd_e2->parsing_data);
2910 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2912 netdev_tx_sent_queue(txq, skb->len);
2914 txdata->tx_pkt_prod++;
2916 * Make sure that the BD data is updated before updating the producer
2917 * since FW might read the BD right after the producer is updated.
2918 * This is only applicable for weak-ordered memory model archs such
2919 * as IA-64. The following barrier is also mandatory since FW will
2920 * assumes packets must have BDs.
2924 txdata->tx_db.data.prod += nbd;
2927 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
2931 txdata->tx_bd_prod += nbd;
2933 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
2934 netif_tx_stop_queue(txq);
2936 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2937 * ordering of set_bit() in netif_tx_stop_queue() and read of
2941 fp->eth_q_stats.driver_xoff++;
2942 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
2943 netif_tx_wake_queue(txq);
2947 return NETDEV_TX_OK;
2951 * bnx2x_setup_tc - routine to configure net_device for multi tc
2953 * @netdev: net device to configure
2954 * @tc: number of traffic classes to enable
2956 * callback connected to the ndo_setup_tc function pointer
2958 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
2960 int cos, prio, count, offset;
2961 struct bnx2x *bp = netdev_priv(dev);
2963 /* setup tc must be called under rtnl lock */
2966 /* no traffic classes requested. aborting */
2968 netdev_reset_tc(dev);
2972 /* requested to support too many traffic classes */
2973 if (num_tc > bp->max_cos) {
2974 DP(NETIF_MSG_TX_ERR, "support for too many traffic classes"
2975 " requested: %d. max supported is %d\n",
2976 num_tc, bp->max_cos);
2980 /* declare amount of supported traffic classes */
2981 if (netdev_set_num_tc(dev, num_tc)) {
2982 DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes\n",
2987 /* configure priority to traffic class mapping */
2988 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
2989 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
2990 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n",
2991 prio, bp->prio_to_cos[prio]);
2995 /* Use this configuration to diffrentiate tc0 from other COSes
2996 This can be used for ets or pfc, and save the effort of setting
2997 up a multio class queue disc or negotiating DCBX with a switch
2998 netdev_set_prio_tc_map(dev, 0, 0);
2999 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
3000 for (prio = 1; prio < 16; prio++) {
3001 netdev_set_prio_tc_map(dev, prio, 1);
3002 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
3005 /* configure traffic class to transmission queue mapping */
3006 for (cos = 0; cos < bp->max_cos; cos++) {
3007 count = BNX2X_NUM_ETH_QUEUES(bp);
3008 offset = cos * MAX_TXQS_PER_COS;
3009 netdev_set_tc_queue(dev, cos, count, offset);
3010 DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d\n",
3011 cos, offset, count);
3017 /* called with rtnl_lock */
3018 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3020 struct sockaddr *addr = p;
3021 struct bnx2x *bp = netdev_priv(dev);
3024 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data))
3028 if (IS_MF_ISCSI_SD(bp) && !is_zero_ether_addr(addr->sa_data))
3032 if (netif_running(dev)) {
3033 rc = bnx2x_set_eth_mac(bp, false);
3038 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3040 if (netif_running(dev))
3041 rc = bnx2x_set_eth_mac(bp, true);
3046 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3048 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3049 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
3054 if (IS_FCOE_IDX(fp_index)) {
3055 memset(sb, 0, sizeof(union host_hc_status_block));
3056 fp->status_blk_mapping = 0;
3061 if (!CHIP_IS_E1x(bp))
3062 BNX2X_PCI_FREE(sb->e2_sb,
3063 bnx2x_fp(bp, fp_index,
3064 status_blk_mapping),
3065 sizeof(struct host_hc_status_block_e2));
3067 BNX2X_PCI_FREE(sb->e1x_sb,
3068 bnx2x_fp(bp, fp_index,
3069 status_blk_mapping),
3070 sizeof(struct host_hc_status_block_e1x));
3075 if (!skip_rx_queue(bp, fp_index)) {
3076 bnx2x_free_rx_bds(fp);
3078 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3079 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3080 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3081 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3082 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3084 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3085 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3086 sizeof(struct eth_fast_path_rx_cqe) *
3090 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3091 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3092 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3093 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3097 if (!skip_tx_queue(bp, fp_index)) {
3098 /* fastpath tx rings: tx_buf tx_desc */
3099 for_each_cos_in_tx_queue(fp, cos) {
3100 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3103 "freeing tx memory of fp %d cos %d cid %d\n",
3104 fp_index, cos, txdata->cid);
3106 BNX2X_FREE(txdata->tx_buf_ring);
3107 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3108 txdata->tx_desc_mapping,
3109 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3112 /* end of fastpath */
3115 void bnx2x_free_fp_mem(struct bnx2x *bp)
3118 for_each_queue(bp, i)
3119 bnx2x_free_fp_mem_at(bp, i);
3122 static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
3124 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
3125 if (!CHIP_IS_E1x(bp)) {
3126 bnx2x_fp(bp, index, sb_index_values) =
3127 (__le16 *)status_blk.e2_sb->sb.index_values;
3128 bnx2x_fp(bp, index, sb_running_index) =
3129 (__le16 *)status_blk.e2_sb->sb.running_index;
3131 bnx2x_fp(bp, index, sb_index_values) =
3132 (__le16 *)status_blk.e1x_sb->sb.index_values;
3133 bnx2x_fp(bp, index, sb_running_index) =
3134 (__le16 *)status_blk.e1x_sb->sb.running_index;
3138 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3140 union host_hc_status_block *sb;
3141 struct bnx2x_fastpath *fp = &bp->fp[index];
3144 int rx_ring_size = 0;
3147 if (!bp->rx_ring_size && IS_MF_ISCSI_SD(bp)) {
3148 rx_ring_size = MIN_RX_SIZE_NONTPA;
3149 bp->rx_ring_size = rx_ring_size;
3152 if (!bp->rx_ring_size) {
3154 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3156 /* allocate at least number of buffers required by FW */
3157 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3158 MIN_RX_SIZE_TPA, rx_ring_size);
3160 bp->rx_ring_size = rx_ring_size;
3161 } else /* if rx_ring_size specified - use it */
3162 rx_ring_size = bp->rx_ring_size;
3165 sb = &bnx2x_fp(bp, index, status_blk);
3167 if (!IS_FCOE_IDX(index)) {
3170 if (!CHIP_IS_E1x(bp))
3171 BNX2X_PCI_ALLOC(sb->e2_sb,
3172 &bnx2x_fp(bp, index, status_blk_mapping),
3173 sizeof(struct host_hc_status_block_e2));
3175 BNX2X_PCI_ALLOC(sb->e1x_sb,
3176 &bnx2x_fp(bp, index, status_blk_mapping),
3177 sizeof(struct host_hc_status_block_e1x));
3182 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3183 * set shortcuts for it.
3185 if (!IS_FCOE_IDX(index))
3186 set_sb_shortcuts(bp, index);
3189 if (!skip_tx_queue(bp, index)) {
3190 /* fastpath tx rings: tx_buf tx_desc */
3191 for_each_cos_in_tx_queue(fp, cos) {
3192 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3194 DP(BNX2X_MSG_SP, "allocating tx memory of "
3198 BNX2X_ALLOC(txdata->tx_buf_ring,
3199 sizeof(struct sw_tx_bd) * NUM_TX_BD);
3200 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3201 &txdata->tx_desc_mapping,
3202 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3207 if (!skip_rx_queue(bp, index)) {
3208 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3209 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3210 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3211 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3212 &bnx2x_fp(bp, index, rx_desc_mapping),
3213 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3215 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3216 &bnx2x_fp(bp, index, rx_comp_mapping),
3217 sizeof(struct eth_fast_path_rx_cqe) *
3221 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3222 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3223 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3224 &bnx2x_fp(bp, index, rx_sge_mapping),
3225 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3227 bnx2x_set_next_page_rx_bd(fp);
3230 bnx2x_set_next_page_rx_cq(fp);
3233 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3234 if (ring_size < rx_ring_size)
3240 /* handles low memory cases */
3242 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3244 /* FW will drop all packets if queue is not big enough,
3245 * In these cases we disable the queue
3246 * Min size is different for OOO, TPA and non-TPA queues
3248 if (ring_size < (fp->disable_tpa ?
3249 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
3250 /* release memory allocated for this queue */
3251 bnx2x_free_fp_mem_at(bp, index);
3257 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3262 * 1. Allocate FP for leading - fatal if error
3263 * 2. {CNIC} Allocate FCoE FP - fatal if error
3264 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3265 * 4. Allocate RSS - fix number of queues if error
3269 if (bnx2x_alloc_fp_mem_at(bp, 0))
3275 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
3276 /* we will fail load process instead of mark
3283 for_each_nondefault_eth_queue(bp, i)
3284 if (bnx2x_alloc_fp_mem_at(bp, i))
3287 /* handle memory failures */
3288 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3289 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3294 * move non eth FPs next to last eth FP
3295 * must be done in that order
3296 * FCOE_IDX < FWD_IDX < OOO_IDX
3299 /* move FCoE fp even NO_FCOE_FLAG is on */
3300 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
3302 bp->num_queues -= delta;
3303 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3304 bp->num_queues + delta, bp->num_queues);
3310 void bnx2x_free_mem_bp(struct bnx2x *bp)
3313 kfree(bp->msix_table);
3317 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3319 struct bnx2x_fastpath *fp;
3320 struct msix_entry *tbl;
3321 struct bnx2x_ilt *ilt;
3322 int msix_table_size = 0;
3325 * The biggest MSI-X table we might need is as a maximum number of fast
3326 * path IGU SBs plus default SB (for PF).
3328 msix_table_size = bp->igu_sb_cnt + 1;
3330 /* fp array: RSS plus CNIC related L2 queues */
3331 fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE,
3332 sizeof(*fp), GFP_KERNEL);
3338 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
3341 bp->msix_table = tbl;
3344 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3351 bnx2x_free_mem_bp(bp);
3356 int bnx2x_reload_if_running(struct net_device *dev)
3358 struct bnx2x *bp = netdev_priv(dev);
3360 if (unlikely(!netif_running(dev)))
3363 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3364 return bnx2x_nic_load(bp, LOAD_NORMAL);
3367 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3369 u32 sel_phy_idx = 0;
3370 if (bp->link_params.num_phys <= 1)
3373 if (bp->link_vars.link_up) {
3374 sel_phy_idx = EXT_PHY1;
3375 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3376 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3377 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3378 sel_phy_idx = EXT_PHY2;
3381 switch (bnx2x_phy_selection(&bp->link_params)) {
3382 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3383 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3384 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3385 sel_phy_idx = EXT_PHY1;
3387 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3388 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3389 sel_phy_idx = EXT_PHY2;
3397 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3399 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3401 * The selected actived PHY is always after swapping (in case PHY
3402 * swapping is enabled). So when swapping is enabled, we need to reverse
3406 if (bp->link_params.multi_phy_config &
3407 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3408 if (sel_phy_idx == EXT_PHY1)
3409 sel_phy_idx = EXT_PHY2;
3410 else if (sel_phy_idx == EXT_PHY2)
3411 sel_phy_idx = EXT_PHY1;
3413 return LINK_CONFIG_IDX(sel_phy_idx);
3416 #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3417 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3419 struct bnx2x *bp = netdev_priv(dev);
3420 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3423 case NETDEV_FCOE_WWNN:
3424 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3425 cp->fcoe_wwn_node_name_lo);
3427 case NETDEV_FCOE_WWPN:
3428 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3429 cp->fcoe_wwn_port_name_lo);
3439 /* called with rtnl_lock */
3440 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3442 struct bnx2x *bp = netdev_priv(dev);
3444 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3445 pr_err("Handling parity error recovery. Try again later\n");
3449 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
3450 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
3453 /* This does not race with packet allocation
3454 * because the actual alloc size is
3455 * only updated as part of load
3459 return bnx2x_reload_if_running(dev);
3462 netdev_features_t bnx2x_fix_features(struct net_device *dev,
3463 netdev_features_t features)
3465 struct bnx2x *bp = netdev_priv(dev);
3467 /* TPA requires Rx CSUM offloading */
3468 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
3469 features &= ~NETIF_F_LRO;
3474 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
3476 struct bnx2x *bp = netdev_priv(dev);
3477 u32 flags = bp->flags;
3478 bool bnx2x_reload = false;
3480 if (features & NETIF_F_LRO)
3481 flags |= TPA_ENABLE_FLAG;
3483 flags &= ~TPA_ENABLE_FLAG;
3485 if (features & NETIF_F_LOOPBACK) {
3486 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3487 bp->link_params.loopback_mode = LOOPBACK_BMAC;
3488 bnx2x_reload = true;
3491 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3492 bp->link_params.loopback_mode = LOOPBACK_NONE;
3493 bnx2x_reload = true;
3497 if (flags ^ bp->flags) {
3499 bnx2x_reload = true;
3503 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3504 return bnx2x_reload_if_running(dev);
3505 /* else: bnx2x_nic_load() will be called at end of recovery */
3511 void bnx2x_tx_timeout(struct net_device *dev)
3513 struct bnx2x *bp = netdev_priv(dev);
3515 #ifdef BNX2X_STOP_ON_ERROR
3520 smp_mb__before_clear_bit();
3521 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3522 smp_mb__after_clear_bit();
3524 /* This allows the netif to be shutdown gracefully before resetting */
3525 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3528 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3530 struct net_device *dev = pci_get_drvdata(pdev);
3534 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3537 bp = netdev_priv(dev);
3541 pci_save_state(pdev);
3543 if (!netif_running(dev)) {
3548 netif_device_detach(dev);
3550 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3552 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3559 int bnx2x_resume(struct pci_dev *pdev)
3561 struct net_device *dev = pci_get_drvdata(pdev);
3566 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3569 bp = netdev_priv(dev);
3571 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3572 pr_err("Handling parity error recovery. Try again later\n");
3578 pci_restore_state(pdev);
3580 if (!netif_running(dev)) {
3585 bnx2x_set_power_state(bp, PCI_D0);
3586 netif_device_attach(dev);
3588 /* Since the chip was reset, clear the FW sequence number */
3590 rc = bnx2x_nic_load(bp, LOAD_OPEN);
3598 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3601 /* ustorm cxt validation */
3602 cxt->ustorm_ag_context.cdu_usage =
3603 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3604 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
3605 /* xcontext validation */
3606 cxt->xstorm_ag_context.cdu_reserved =
3607 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3608 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3611 static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3612 u8 fw_sb_id, u8 sb_index,
3616 u32 addr = BAR_CSTRORM_INTMEM +
3617 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
3618 REG_WR8(bp, addr, ticks);
3619 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
3620 port, fw_sb_id, sb_index, ticks);
3623 static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3624 u16 fw_sb_id, u8 sb_index,
3627 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3628 u32 addr = BAR_CSTRORM_INTMEM +
3629 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
3630 u16 flags = REG_RD16(bp, addr);
3632 flags &= ~HC_INDEX_DATA_HC_ENABLED;
3633 flags |= enable_flag;
3634 REG_WR16(bp, addr, flags);
3635 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
3636 port, fw_sb_id, sb_index, disable);
3639 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
3640 u8 sb_index, u8 disable, u16 usec)
3642 int port = BP_PORT(bp);
3643 u8 ticks = usec / BNX2X_BTR;
3645 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3647 disable = disable ? 1 : (usec ? 0 : 1);
3648 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);