1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2013 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
26 #include <net/ip6_checksum.h>
27 #include <net/busy_poll.h>
28 #include <linux/prefetch.h>
29 #include "bnx2x_cmn.h"
30 #include "bnx2x_init.h"
34 * bnx2x_move_fp - move content of the fastpath structure.
37 * @from: source FP index
38 * @to: destination FP index
40 * Makes sure the contents of the bp->fp[to].napi is kept
41 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire
43 * source onto the target. Update txdata pointers and related
46 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
50 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
51 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
52 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
54 int old_max_eth_txqs, new_max_eth_txqs;
55 int old_txdata_index = 0, new_txdata_index = 0;
56 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
58 /* Copy the NAPI object as it has been already initialized */
59 from_fp->napi = to_fp->napi;
61 /* Move bnx2x_fastpath contents */
62 memcpy(to_fp, from_fp, sizeof(*to_fp));
65 /* Retain the tpa_info of the original `to' version as we don't want
66 * 2 FPs to contain the same tpa_info pointer.
68 to_fp->tpa_info = old_tpa_info;
70 /* move sp_objs contents as well, as their indices match fp ones */
71 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
73 /* move fp_stats contents as well, as their indices match fp ones */
74 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
76 /* Update txdata pointers in fp and move txdata content accordingly:
77 * Each fp consumes 'max_cos' txdata structures, so the index should be
78 * decremented by max_cos x delta.
81 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
82 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
84 if (from == FCOE_IDX(bp)) {
85 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
86 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
89 memcpy(&bp->bnx2x_txq[new_txdata_index],
90 &bp->bnx2x_txq[old_txdata_index],
91 sizeof(struct bnx2x_fp_txdata));
92 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
96 * bnx2x_fill_fw_str - Fill buffer with FW version string.
99 * @buf: character buffer to fill with the fw name
100 * @buf_len: length of the above buffer
103 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
106 u8 phy_fw_ver[PHY_FW_VER_LEN];
108 phy_fw_ver[0] = '\0';
109 bnx2x_get_ext_phy_fw_version(&bp->link_params,
110 phy_fw_ver, PHY_FW_VER_LEN);
111 strlcpy(buf, bp->fw_ver, buf_len);
112 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
114 (bp->common.bc_ver & 0xff0000) >> 16,
115 (bp->common.bc_ver & 0xff00) >> 8,
116 (bp->common.bc_ver & 0xff),
117 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
119 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
124 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
127 * @delta: number of eth queues which were not allocated
129 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
131 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
133 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
134 * backward along the array could cause memory to be overridden
136 for (cos = 1; cos < bp->max_cos; cos++) {
137 for (i = 0; i < old_eth_num - delta; i++) {
138 struct bnx2x_fastpath *fp = &bp->fp[i];
139 int new_idx = cos * (old_eth_num - delta) + i;
141 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
142 sizeof(struct bnx2x_fp_txdata));
143 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
148 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
150 /* free skb in the packet ring at pos idx
151 * return idx of last bd freed
153 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
154 u16 idx, unsigned int *pkts_compl,
155 unsigned int *bytes_compl)
157 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
158 struct eth_tx_start_bd *tx_start_bd;
159 struct eth_tx_bd *tx_data_bd;
160 struct sk_buff *skb = tx_buf->skb;
161 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
164 /* prefetch skb end pointer to speedup dev_kfree_skb() */
167 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
168 txdata->txq_index, idx, tx_buf, skb);
171 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
172 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
173 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
175 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
176 #ifdef BNX2X_STOP_ON_ERROR
177 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
178 BNX2X_ERR("BAD nbd!\n");
182 new_cons = nbd + tx_buf->first_bd;
184 /* Get the next bd */
185 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
187 /* Skip a parse bd... */
189 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
191 /* ...and the TSO split header bd since they have no mapping */
192 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
194 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
200 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
201 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
202 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
204 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
211 (*bytes_compl) += skb->len;
214 dev_kfree_skb_any(skb);
215 tx_buf->first_bd = 0;
221 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
223 struct netdev_queue *txq;
224 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
225 unsigned int pkts_compl = 0, bytes_compl = 0;
227 #ifdef BNX2X_STOP_ON_ERROR
228 if (unlikely(bp->panic))
232 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
233 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
234 sw_cons = txdata->tx_pkt_cons;
236 while (sw_cons != hw_cons) {
239 pkt_cons = TX_BD(sw_cons);
241 DP(NETIF_MSG_TX_DONE,
242 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
243 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
245 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
246 &pkts_compl, &bytes_compl);
251 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
253 txdata->tx_pkt_cons = sw_cons;
254 txdata->tx_bd_cons = bd_cons;
256 /* Need to make the tx_bd_cons update visible to start_xmit()
257 * before checking for netif_tx_queue_stopped(). Without the
258 * memory barrier, there is a small possibility that
259 * start_xmit() will miss it and cause the queue to be stopped
261 * On the other hand we need an rmb() here to ensure the proper
262 * ordering of bit testing in the following
263 * netif_tx_queue_stopped(txq) call.
267 if (unlikely(netif_tx_queue_stopped(txq))) {
268 /* Taking tx_lock() is needed to prevent re-enabling the queue
269 * while it's empty. This could have happen if rx_action() gets
270 * suspended in bnx2x_tx_int() after the condition before
271 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
273 * stops the queue->sees fresh tx_bd_cons->releases the queue->
274 * sends some packets consuming the whole queue again->
278 __netif_tx_lock(txq, smp_processor_id());
280 if ((netif_tx_queue_stopped(txq)) &&
281 (bp->state == BNX2X_STATE_OPEN) &&
282 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
283 netif_tx_wake_queue(txq);
285 __netif_tx_unlock(txq);
290 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
293 u16 last_max = fp->last_max_sge;
295 if (SUB_S16(idx, last_max) > 0)
296 fp->last_max_sge = idx;
299 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
301 struct eth_end_agg_rx_cqe *cqe)
303 struct bnx2x *bp = fp->bp;
304 u16 last_max, last_elem, first_elem;
311 /* First mark all used pages */
312 for (i = 0; i < sge_len; i++)
313 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
314 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
316 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
317 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
319 /* Here we assume that the last SGE index is the biggest */
320 prefetch((void *)(fp->sge_mask));
321 bnx2x_update_last_max_sge(fp,
322 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
324 last_max = RX_SGE(fp->last_max_sge);
325 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
326 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
328 /* If ring is not full */
329 if (last_elem + 1 != first_elem)
332 /* Now update the prod */
333 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
334 if (likely(fp->sge_mask[i]))
337 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
338 delta += BIT_VEC64_ELEM_SZ;
342 fp->rx_sge_prod += delta;
343 /* clear page-end entries */
344 bnx2x_clear_sge_mask_next_elems(fp);
347 DP(NETIF_MSG_RX_STATUS,
348 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
349 fp->last_max_sge, fp->rx_sge_prod);
352 /* Get Toeplitz hash value in the skb using the value from the
353 * CQE (calculated by HW).
355 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
356 const struct eth_fast_path_rx_cqe *cqe,
359 /* Get Toeplitz hash from CQE */
360 if ((bp->dev->features & NETIF_F_RXHASH) &&
361 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
362 enum eth_rss_hash_type htype;
364 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
365 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
366 (htype == TCP_IPV6_HASH_TYPE);
367 return le32_to_cpu(cqe->rss_hash_result);
373 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
375 struct eth_fast_path_rx_cqe *cqe)
377 struct bnx2x *bp = fp->bp;
378 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
379 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
380 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
382 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
383 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
385 /* print error if current state != stop */
386 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
387 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
389 /* Try to map an empty data buffer from the aggregation info */
390 mapping = dma_map_single(&bp->pdev->dev,
391 first_buf->data + NET_SKB_PAD,
392 fp->rx_buf_size, DMA_FROM_DEVICE);
394 * ...if it fails - move the skb from the consumer to the producer
395 * and set the current aggregation state as ERROR to drop it
396 * when TPA_STOP arrives.
399 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
400 /* Move the BD from the consumer to the producer */
401 bnx2x_reuse_rx_data(fp, cons, prod);
402 tpa_info->tpa_state = BNX2X_TPA_ERROR;
406 /* move empty data from pool to prod */
407 prod_rx_buf->data = first_buf->data;
408 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
409 /* point prod_bd to new data */
410 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
411 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
413 /* move partial skb from cons to pool (don't unmap yet) */
414 *first_buf = *cons_rx_buf;
416 /* mark bin state as START */
417 tpa_info->parsing_flags =
418 le16_to_cpu(cqe->pars_flags.flags);
419 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
420 tpa_info->tpa_state = BNX2X_TPA_START;
421 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
422 tpa_info->placement_offset = cqe->placement_offset;
423 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
424 if (fp->mode == TPA_MODE_GRO) {
425 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
426 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
427 tpa_info->gro_size = gro_size;
430 #ifdef BNX2X_STOP_ON_ERROR
431 fp->tpa_queue_used |= (1 << queue);
432 #ifdef _ASM_GENERIC_INT_L64_H
433 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
435 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
441 /* Timestamp option length allowed for TPA aggregation:
443 * nop nop kind length echo val
445 #define TPA_TSTAMP_OPT_LEN 12
447 * bnx2x_set_gro_params - compute GRO values
450 * @parsing_flags: parsing flags from the START CQE
451 * @len_on_bd: total length of the first packet for the
453 * @pkt_len: length of all segments
455 * Approximate value of the MSS for this aggregation calculated using
456 * the first packet of it.
457 * Compute number of aggregated segments, and gso_type.
459 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
460 u16 len_on_bd, unsigned int pkt_len,
461 u16 num_of_coalesced_segs)
463 /* TPA aggregation won't have either IP options or TCP options
464 * other than timestamp or IPv6 extension headers.
466 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
468 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
469 PRS_FLAG_OVERETH_IPV6) {
470 hdrs_len += sizeof(struct ipv6hdr);
471 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
473 hdrs_len += sizeof(struct iphdr);
474 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
477 /* Check if there was a TCP timestamp, if there is it's will
478 * always be 12 bytes length: nop nop kind length echo val.
480 * Otherwise FW would close the aggregation.
482 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
483 hdrs_len += TPA_TSTAMP_OPT_LEN;
485 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
487 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
488 * to skb_shinfo(skb)->gso_segs
490 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
493 static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
494 struct bnx2x_fastpath *fp, u16 index)
496 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
497 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
498 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
501 if (unlikely(page == NULL)) {
502 BNX2X_ERR("Can't alloc sge\n");
506 mapping = dma_map_page(&bp->pdev->dev, page, 0,
507 SGE_PAGES, DMA_FROM_DEVICE);
508 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
509 __free_pages(page, PAGES_PER_SGE_SHIFT);
510 BNX2X_ERR("Can't map sge\n");
515 dma_unmap_addr_set(sw_buf, mapping, mapping);
517 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
518 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
523 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
524 struct bnx2x_agg_info *tpa_info,
527 struct eth_end_agg_rx_cqe *cqe,
530 struct sw_rx_page *rx_pg, old_rx_pg;
531 u32 i, frag_len, frag_size;
532 int err, j, frag_id = 0;
533 u16 len_on_bd = tpa_info->len_on_bd;
534 u16 full_page = 0, gro_size = 0;
536 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
538 if (fp->mode == TPA_MODE_GRO) {
539 gro_size = tpa_info->gro_size;
540 full_page = tpa_info->full_page;
543 /* This is needed in order to enable forwarding support */
545 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
546 le16_to_cpu(cqe->pkt_len),
547 le16_to_cpu(cqe->num_of_coalesced_segs));
549 #ifdef BNX2X_STOP_ON_ERROR
550 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
551 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
553 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
559 /* Run through the SGL and compose the fragmented skb */
560 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
561 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
563 /* FW gives the indices of the SGE as if the ring is an array
564 (meaning that "next" element will consume 2 indices) */
565 if (fp->mode == TPA_MODE_GRO)
566 frag_len = min_t(u32, frag_size, (u32)full_page);
568 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
570 rx_pg = &fp->rx_page_ring[sge_idx];
573 /* If we fail to allocate a substitute page, we simply stop
574 where we are and drop the whole packet */
575 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
577 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
581 /* Unmap the page as we're going to pass it to the stack */
582 dma_unmap_page(&bp->pdev->dev,
583 dma_unmap_addr(&old_rx_pg, mapping),
584 SGE_PAGES, DMA_FROM_DEVICE);
585 /* Add one frag and update the appropriate fields in the skb */
586 if (fp->mode == TPA_MODE_LRO)
587 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
591 for (rem = frag_len; rem > 0; rem -= gro_size) {
592 int len = rem > gro_size ? gro_size : rem;
593 skb_fill_page_desc(skb, frag_id++,
594 old_rx_pg.page, offset, len);
596 get_page(old_rx_pg.page);
601 skb->data_len += frag_len;
602 skb->truesize += SGE_PAGES;
603 skb->len += frag_len;
605 frag_size -= frag_len;
611 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
613 if (fp->rx_frag_size)
614 put_page(virt_to_head_page(data));
619 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
621 if (fp->rx_frag_size)
622 return netdev_alloc_frag(fp->rx_frag_size);
624 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
628 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
630 const struct iphdr *iph = ip_hdr(skb);
633 skb_set_transport_header(skb, sizeof(struct iphdr));
636 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
637 iph->saddr, iph->daddr, 0);
640 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
642 struct ipv6hdr *iph = ipv6_hdr(skb);
645 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
648 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
649 &iph->saddr, &iph->daddr, 0);
652 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
653 void (*gro_func)(struct bnx2x*, struct sk_buff*))
655 skb_set_network_header(skb, 0);
657 tcp_gro_complete(skb);
661 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
665 if (skb_shinfo(skb)->gso_size) {
666 switch (be16_to_cpu(skb->protocol)) {
668 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
671 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
674 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
675 be16_to_cpu(skb->protocol));
679 napi_gro_receive(&fp->napi, skb);
682 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
683 struct bnx2x_agg_info *tpa_info,
685 struct eth_end_agg_rx_cqe *cqe,
688 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
689 u8 pad = tpa_info->placement_offset;
690 u16 len = tpa_info->len_on_bd;
691 struct sk_buff *skb = NULL;
692 u8 *new_data, *data = rx_buf->data;
693 u8 old_tpa_state = tpa_info->tpa_state;
695 tpa_info->tpa_state = BNX2X_TPA_STOP;
697 /* If we there was an error during the handling of the TPA_START -
698 * drop this aggregation.
700 if (old_tpa_state == BNX2X_TPA_ERROR)
703 /* Try to allocate the new data */
704 new_data = bnx2x_frag_alloc(fp);
705 /* Unmap skb in the pool anyway, as we are going to change
706 pool entry status to BNX2X_TPA_STOP even if new skb allocation
708 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
709 fp->rx_buf_size, DMA_FROM_DEVICE);
710 if (likely(new_data))
711 skb = build_skb(data, fp->rx_frag_size);
714 #ifdef BNX2X_STOP_ON_ERROR
715 if (pad + len > fp->rx_buf_size) {
716 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
717 pad, len, fp->rx_buf_size);
723 skb_reserve(skb, pad + NET_SKB_PAD);
725 skb->rxhash = tpa_info->rxhash;
726 skb->l4_rxhash = tpa_info->l4_rxhash;
728 skb->protocol = eth_type_trans(skb, bp->dev);
729 skb->ip_summed = CHECKSUM_UNNECESSARY;
731 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
732 skb, cqe, cqe_idx)) {
733 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
734 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
735 bnx2x_gro_receive(bp, fp, skb);
737 DP(NETIF_MSG_RX_STATUS,
738 "Failed to allocate new pages - dropping packet!\n");
739 dev_kfree_skb_any(skb);
742 /* put new data in bin */
743 rx_buf->data = new_data;
747 bnx2x_frag_free(fp, new_data);
749 /* drop the packet and keep the buffer in the bin */
750 DP(NETIF_MSG_RX_STATUS,
751 "Failed to allocate or map a new skb - dropping packet!\n");
752 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
755 static int bnx2x_alloc_rx_data(struct bnx2x *bp,
756 struct bnx2x_fastpath *fp, u16 index)
759 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
760 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
763 data = bnx2x_frag_alloc(fp);
764 if (unlikely(data == NULL))
767 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
770 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
771 bnx2x_frag_free(fp, data);
772 BNX2X_ERR("Can't map rx data\n");
777 dma_unmap_addr_set(rx_buf, mapping, mapping);
779 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
780 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
786 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
787 struct bnx2x_fastpath *fp,
788 struct bnx2x_eth_q_stats *qstats)
790 /* Do nothing if no L4 csum validation was done.
791 * We do not check whether IP csum was validated. For IPv4 we assume
792 * that if the card got as far as validating the L4 csum, it also
793 * validated the IP csum. IPv6 has no IP csum.
795 if (cqe->fast_path_cqe.status_flags &
796 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
799 /* If L4 validation was done, check if an error was found. */
801 if (cqe->fast_path_cqe.type_error_flags &
802 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
803 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
804 qstats->hw_csum_err++;
806 skb->ip_summed = CHECKSUM_UNNECESSARY;
809 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
811 struct bnx2x *bp = fp->bp;
812 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
813 u16 sw_comp_cons, sw_comp_prod;
815 union eth_rx_cqe *cqe;
816 struct eth_fast_path_rx_cqe *cqe_fp;
818 #ifdef BNX2X_STOP_ON_ERROR
819 if (unlikely(bp->panic))
823 bd_cons = fp->rx_bd_cons;
824 bd_prod = fp->rx_bd_prod;
825 bd_prod_fw = bd_prod;
826 sw_comp_cons = fp->rx_comp_cons;
827 sw_comp_prod = fp->rx_comp_prod;
829 comp_ring_cons = RCQ_BD(sw_comp_cons);
830 cqe = &fp->rx_comp_ring[comp_ring_cons];
831 cqe_fp = &cqe->fast_path_cqe;
833 DP(NETIF_MSG_RX_STATUS,
834 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
836 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
837 struct sw_rx_bd *rx_buf = NULL;
840 enum eth_rx_cqe_type cqe_fp_type;
845 #ifdef BNX2X_STOP_ON_ERROR
846 if (unlikely(bp->panic))
850 bd_prod = RX_BD(bd_prod);
851 bd_cons = RX_BD(bd_cons);
853 cqe_fp_flags = cqe_fp->type_error_flags;
854 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
856 DP(NETIF_MSG_RX_STATUS,
857 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
858 CQE_TYPE(cqe_fp_flags),
859 cqe_fp_flags, cqe_fp->status_flags,
860 le32_to_cpu(cqe_fp->rss_hash_result),
861 le16_to_cpu(cqe_fp->vlan_tag),
862 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
864 /* is this a slowpath msg? */
865 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
866 bnx2x_sp_event(fp, cqe);
870 rx_buf = &fp->rx_buf_ring[bd_cons];
873 if (!CQE_TYPE_FAST(cqe_fp_type)) {
874 struct bnx2x_agg_info *tpa_info;
875 u16 frag_size, pages;
876 #ifdef BNX2X_STOP_ON_ERROR
878 if (fp->disable_tpa &&
879 (CQE_TYPE_START(cqe_fp_type) ||
880 CQE_TYPE_STOP(cqe_fp_type)))
881 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
882 CQE_TYPE(cqe_fp_type));
885 if (CQE_TYPE_START(cqe_fp_type)) {
886 u16 queue = cqe_fp->queue_index;
887 DP(NETIF_MSG_RX_STATUS,
888 "calling tpa_start on queue %d\n",
891 bnx2x_tpa_start(fp, queue,
897 queue = cqe->end_agg_cqe.queue_index;
898 tpa_info = &fp->tpa_info[queue];
899 DP(NETIF_MSG_RX_STATUS,
900 "calling tpa_stop on queue %d\n",
903 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
906 if (fp->mode == TPA_MODE_GRO)
907 pages = (frag_size + tpa_info->full_page - 1) /
910 pages = SGE_PAGE_ALIGN(frag_size) >>
913 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
914 &cqe->end_agg_cqe, comp_ring_cons);
915 #ifdef BNX2X_STOP_ON_ERROR
920 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
924 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
925 pad = cqe_fp->placement_offset;
926 dma_sync_single_for_cpu(&bp->pdev->dev,
927 dma_unmap_addr(rx_buf, mapping),
928 pad + RX_COPY_THRESH,
931 prefetch(data + pad); /* speedup eth_type_trans() */
932 /* is this an error packet? */
933 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
934 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
935 "ERROR flags %x rx packet %u\n",
936 cqe_fp_flags, sw_comp_cons);
937 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
941 /* Since we don't have a jumbo ring
942 * copy small packets if mtu > 1500
944 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
945 (len <= RX_COPY_THRESH)) {
946 skb = netdev_alloc_skb_ip_align(bp->dev, len);
948 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
949 "ERROR packet dropped because of alloc failure\n");
950 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
953 memcpy(skb->data, data + pad, len);
954 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
956 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
957 dma_unmap_single(&bp->pdev->dev,
958 dma_unmap_addr(rx_buf, mapping),
961 skb = build_skb(data, fp->rx_frag_size);
962 if (unlikely(!skb)) {
963 bnx2x_frag_free(fp, data);
964 bnx2x_fp_qstats(bp, fp)->
965 rx_skb_alloc_failed++;
968 skb_reserve(skb, pad);
970 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
971 "ERROR packet dropped because of alloc failure\n");
972 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
974 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
980 skb->protocol = eth_type_trans(skb, bp->dev);
982 /* Set Toeplitz hash for a none-LRO skb */
983 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
984 skb->l4_rxhash = l4_rxhash;
986 skb_checksum_none_assert(skb);
988 if (bp->dev->features & NETIF_F_RXCSUM)
989 bnx2x_csum_validate(skb, cqe, fp,
990 bnx2x_fp_qstats(bp, fp));
992 skb_record_rx_queue(skb, fp->rx_queue);
994 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
996 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
997 le16_to_cpu(cqe_fp->vlan_tag));
999 skb_mark_napi_id(skb, &fp->napi);
1001 if (bnx2x_fp_ll_polling(fp))
1002 netif_receive_skb(skb);
1004 napi_gro_receive(&fp->napi, skb);
1006 rx_buf->data = NULL;
1008 bd_cons = NEXT_RX_IDX(bd_cons);
1009 bd_prod = NEXT_RX_IDX(bd_prod);
1010 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1013 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1014 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1016 /* mark CQE as free */
1017 BNX2X_SEED_CQE(cqe_fp);
1019 if (rx_pkt == budget)
1022 comp_ring_cons = RCQ_BD(sw_comp_cons);
1023 cqe = &fp->rx_comp_ring[comp_ring_cons];
1024 cqe_fp = &cqe->fast_path_cqe;
1027 fp->rx_bd_cons = bd_cons;
1028 fp->rx_bd_prod = bd_prod_fw;
1029 fp->rx_comp_cons = sw_comp_cons;
1030 fp->rx_comp_prod = sw_comp_prod;
1032 /* Update producers */
1033 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1036 fp->rx_pkt += rx_pkt;
1042 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1044 struct bnx2x_fastpath *fp = fp_cookie;
1045 struct bnx2x *bp = fp->bp;
1049 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1050 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1052 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1054 #ifdef BNX2X_STOP_ON_ERROR
1055 if (unlikely(bp->panic))
1059 /* Handle Rx and Tx according to MSI-X vector */
1060 for_each_cos_in_tx_queue(fp, cos)
1061 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1063 prefetch(&fp->sb_running_index[SM_RX_ID]);
1064 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1069 /* HW Lock for shared dual port PHYs */
1070 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1072 mutex_lock(&bp->port.phy_mutex);
1074 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1077 void bnx2x_release_phy_lock(struct bnx2x *bp)
1079 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1081 mutex_unlock(&bp->port.phy_mutex);
1084 /* calculates MF speed according to current linespeed and MF configuration */
1085 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1087 u16 line_speed = bp->link_vars.line_speed;
1089 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1090 bp->mf_config[BP_VN(bp)]);
1092 /* Calculate the current MAX line speed limit for the MF
1096 line_speed = (line_speed * maxCfg) / 100;
1097 else { /* SD mode */
1098 u16 vn_max_rate = maxCfg * 100;
1100 if (vn_max_rate < line_speed)
1101 line_speed = vn_max_rate;
1109 * bnx2x_fill_report_data - fill link report data to report
1111 * @bp: driver handle
1112 * @data: link state to update
1114 * It uses a none-atomic bit operations because is called under the mutex.
1116 static void bnx2x_fill_report_data(struct bnx2x *bp,
1117 struct bnx2x_link_report_data *data)
1119 u16 line_speed = bnx2x_get_mf_speed(bp);
1121 memset(data, 0, sizeof(*data));
1123 /* Fill the report data: effective line speed */
1124 data->line_speed = line_speed;
1127 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1128 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1129 &data->link_report_flags);
1132 if (bp->link_vars.duplex == DUPLEX_FULL)
1133 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1135 /* Rx Flow Control is ON */
1136 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1137 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1139 /* Tx Flow Control is ON */
1140 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1141 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1145 * bnx2x_link_report - report link status to OS.
1147 * @bp: driver handle
1149 * Calls the __bnx2x_link_report() under the same locking scheme
1150 * as a link/PHY state managing code to ensure a consistent link
1154 void bnx2x_link_report(struct bnx2x *bp)
1156 bnx2x_acquire_phy_lock(bp);
1157 __bnx2x_link_report(bp);
1158 bnx2x_release_phy_lock(bp);
1162 * __bnx2x_link_report - report link status to OS.
1164 * @bp: driver handle
1166 * None atomic implementation.
1167 * Should be called under the phy_lock.
1169 void __bnx2x_link_report(struct bnx2x *bp)
1171 struct bnx2x_link_report_data cur_data;
1174 if (IS_PF(bp) && !CHIP_IS_E1(bp))
1175 bnx2x_read_mf_cfg(bp);
1177 /* Read the current link report info */
1178 bnx2x_fill_report_data(bp, &cur_data);
1180 /* Don't report link down or exactly the same link status twice */
1181 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1182 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1183 &bp->last_reported_link.link_report_flags) &&
1184 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1185 &cur_data.link_report_flags)))
1190 /* We are going to report a new link parameters now -
1191 * remember the current data for the next time.
1193 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1195 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1196 &cur_data.link_report_flags)) {
1197 netif_carrier_off(bp->dev);
1198 netdev_err(bp->dev, "NIC Link is Down\n");
1204 netif_carrier_on(bp->dev);
1206 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1207 &cur_data.link_report_flags))
1212 /* Handle the FC at the end so that only these flags would be
1213 * possibly set. This way we may easily check if there is no FC
1216 if (cur_data.link_report_flags) {
1217 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1218 &cur_data.link_report_flags)) {
1219 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1220 &cur_data.link_report_flags))
1221 flow = "ON - receive & transmit";
1223 flow = "ON - receive";
1225 flow = "ON - transmit";
1230 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1231 cur_data.line_speed, duplex, flow);
1235 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1239 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1240 struct eth_rx_sge *sge;
1242 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1244 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1245 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1248 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1249 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1253 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1254 struct bnx2x_fastpath *fp, int last)
1258 for (i = 0; i < last; i++) {
1259 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1260 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1261 u8 *data = first_buf->data;
1264 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1267 if (tpa_info->tpa_state == BNX2X_TPA_START)
1268 dma_unmap_single(&bp->pdev->dev,
1269 dma_unmap_addr(first_buf, mapping),
1270 fp->rx_buf_size, DMA_FROM_DEVICE);
1271 bnx2x_frag_free(fp, data);
1272 first_buf->data = NULL;
1276 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1280 for_each_rx_queue_cnic(bp, j) {
1281 struct bnx2x_fastpath *fp = &bp->fp[j];
1285 /* Activate BD ring */
1287 * this will generate an interrupt (to the TSTORM)
1288 * must only be done after chip is initialized
1290 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1295 void bnx2x_init_rx_rings(struct bnx2x *bp)
1297 int func = BP_FUNC(bp);
1301 /* Allocate TPA resources */
1302 for_each_eth_queue(bp, j) {
1303 struct bnx2x_fastpath *fp = &bp->fp[j];
1306 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1308 if (!fp->disable_tpa) {
1309 /* Fill the per-aggregation pool */
1310 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1311 struct bnx2x_agg_info *tpa_info =
1313 struct sw_rx_bd *first_buf =
1314 &tpa_info->first_buf;
1316 first_buf->data = bnx2x_frag_alloc(fp);
1317 if (!first_buf->data) {
1318 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1320 bnx2x_free_tpa_pool(bp, fp, i);
1321 fp->disable_tpa = 1;
1324 dma_unmap_addr_set(first_buf, mapping, 0);
1325 tpa_info->tpa_state = BNX2X_TPA_STOP;
1328 /* "next page" elements initialization */
1329 bnx2x_set_next_page_sgl(fp);
1331 /* set SGEs bit mask */
1332 bnx2x_init_sge_ring_bit_mask(fp);
1334 /* Allocate SGEs and initialize the ring elements */
1335 for (i = 0, ring_prod = 0;
1336 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1338 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1339 BNX2X_ERR("was only able to allocate %d rx sges\n",
1341 BNX2X_ERR("disabling TPA for queue[%d]\n",
1343 /* Cleanup already allocated elements */
1344 bnx2x_free_rx_sge_range(bp, fp,
1346 bnx2x_free_tpa_pool(bp, fp,
1348 fp->disable_tpa = 1;
1352 ring_prod = NEXT_SGE_IDX(ring_prod);
1355 fp->rx_sge_prod = ring_prod;
1359 for_each_eth_queue(bp, j) {
1360 struct bnx2x_fastpath *fp = &bp->fp[j];
1364 /* Activate BD ring */
1366 * this will generate an interrupt (to the TSTORM)
1367 * must only be done after chip is initialized
1369 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1375 if (CHIP_IS_E1(bp)) {
1376 REG_WR(bp, BAR_USTRORM_INTMEM +
1377 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1378 U64_LO(fp->rx_comp_mapping));
1379 REG_WR(bp, BAR_USTRORM_INTMEM +
1380 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1381 U64_HI(fp->rx_comp_mapping));
1386 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1389 struct bnx2x *bp = fp->bp;
1391 for_each_cos_in_tx_queue(fp, cos) {
1392 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1393 unsigned pkts_compl = 0, bytes_compl = 0;
1395 u16 sw_prod = txdata->tx_pkt_prod;
1396 u16 sw_cons = txdata->tx_pkt_cons;
1398 while (sw_cons != sw_prod) {
1399 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1400 &pkts_compl, &bytes_compl);
1404 netdev_tx_reset_queue(
1405 netdev_get_tx_queue(bp->dev,
1406 txdata->txq_index));
1410 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1414 for_each_tx_queue_cnic(bp, i) {
1415 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1419 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1423 for_each_eth_queue(bp, i) {
1424 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1428 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1430 struct bnx2x *bp = fp->bp;
1433 /* ring wasn't allocated */
1434 if (fp->rx_buf_ring == NULL)
1437 for (i = 0; i < NUM_RX_BD; i++) {
1438 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1439 u8 *data = rx_buf->data;
1443 dma_unmap_single(&bp->pdev->dev,
1444 dma_unmap_addr(rx_buf, mapping),
1445 fp->rx_buf_size, DMA_FROM_DEVICE);
1447 rx_buf->data = NULL;
1448 bnx2x_frag_free(fp, data);
1452 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1456 for_each_rx_queue_cnic(bp, j) {
1457 bnx2x_free_rx_bds(&bp->fp[j]);
1461 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1465 for_each_eth_queue(bp, j) {
1466 struct bnx2x_fastpath *fp = &bp->fp[j];
1468 bnx2x_free_rx_bds(fp);
1470 if (!fp->disable_tpa)
1471 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1475 void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1477 bnx2x_free_tx_skbs_cnic(bp);
1478 bnx2x_free_rx_skbs_cnic(bp);
1481 void bnx2x_free_skbs(struct bnx2x *bp)
1483 bnx2x_free_tx_skbs(bp);
1484 bnx2x_free_rx_skbs(bp);
1487 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1489 /* load old values */
1490 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1492 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1493 /* leave all but MAX value */
1494 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1496 /* set new MAX value */
1497 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1498 & FUNC_MF_CFG_MAX_BW_MASK;
1500 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1505 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1507 * @bp: driver handle
1508 * @nvecs: number of vectors to be released
1510 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1514 if (nvecs == offset)
1517 /* VFs don't have a default SB */
1519 free_irq(bp->msix_table[offset].vector, bp->dev);
1520 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1521 bp->msix_table[offset].vector);
1525 if (CNIC_SUPPORT(bp)) {
1526 if (nvecs == offset)
1531 for_each_eth_queue(bp, i) {
1532 if (nvecs == offset)
1534 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1535 i, bp->msix_table[offset].vector);
1537 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1541 void bnx2x_free_irq(struct bnx2x *bp)
1543 if (bp->flags & USING_MSIX_FLAG &&
1544 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1545 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1547 /* vfs don't have a default status block */
1551 bnx2x_free_msix_irqs(bp, nvecs);
1553 free_irq(bp->dev->irq, bp->dev);
1557 int bnx2x_enable_msix(struct bnx2x *bp)
1559 int msix_vec = 0, i, rc;
1561 /* VFs don't have a default status block */
1563 bp->msix_table[msix_vec].entry = msix_vec;
1564 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1565 bp->msix_table[0].entry);
1569 /* Cnic requires an msix vector for itself */
1570 if (CNIC_SUPPORT(bp)) {
1571 bp->msix_table[msix_vec].entry = msix_vec;
1572 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1573 msix_vec, bp->msix_table[msix_vec].entry);
1577 /* We need separate vectors for ETH queues only (not FCoE) */
1578 for_each_eth_queue(bp, i) {
1579 bp->msix_table[msix_vec].entry = msix_vec;
1580 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1581 msix_vec, msix_vec, i);
1585 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1588 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
1591 * reconfigure number of tx/rx queues according to available
1594 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
1595 /* how less vectors we will have? */
1596 int diff = msix_vec - rc;
1598 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1600 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1603 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1607 * decrease number of queues by number of unallocated entries
1609 bp->num_ethernet_queues -= diff;
1610 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1612 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1614 } else if (rc > 0) {
1615 /* Get by with single vector */
1616 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1618 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1623 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1624 bp->flags |= USING_SINGLE_MSIX_FLAG;
1626 BNX2X_DEV_INFO("set number of queues to 1\n");
1627 bp->num_ethernet_queues = 1;
1628 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1629 } else if (rc < 0) {
1630 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1634 bp->flags |= USING_MSIX_FLAG;
1639 /* fall to INTx if not enough memory */
1641 bp->flags |= DISABLE_MSI_FLAG;
1646 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1648 int i, rc, offset = 0;
1650 /* no default status block for vf */
1652 rc = request_irq(bp->msix_table[offset++].vector,
1653 bnx2x_msix_sp_int, 0,
1654 bp->dev->name, bp->dev);
1656 BNX2X_ERR("request sp irq failed\n");
1661 if (CNIC_SUPPORT(bp))
1664 for_each_eth_queue(bp, i) {
1665 struct bnx2x_fastpath *fp = &bp->fp[i];
1666 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1669 rc = request_irq(bp->msix_table[offset].vector,
1670 bnx2x_msix_fp_int, 0, fp->name, fp);
1672 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1673 bp->msix_table[offset].vector, rc);
1674 bnx2x_free_msix_irqs(bp, offset);
1681 i = BNX2X_NUM_ETH_QUEUES(bp);
1683 offset = 1 + CNIC_SUPPORT(bp);
1684 netdev_info(bp->dev,
1685 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1686 bp->msix_table[0].vector,
1687 0, bp->msix_table[offset].vector,
1688 i - 1, bp->msix_table[offset + i - 1].vector);
1690 offset = CNIC_SUPPORT(bp);
1691 netdev_info(bp->dev,
1692 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1693 0, bp->msix_table[offset].vector,
1694 i - 1, bp->msix_table[offset + i - 1].vector);
1699 int bnx2x_enable_msi(struct bnx2x *bp)
1703 rc = pci_enable_msi(bp->pdev);
1705 BNX2X_DEV_INFO("MSI is not attainable\n");
1708 bp->flags |= USING_MSI_FLAG;
1713 static int bnx2x_req_irq(struct bnx2x *bp)
1715 unsigned long flags;
1718 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1721 flags = IRQF_SHARED;
1723 if (bp->flags & USING_MSIX_FLAG)
1724 irq = bp->msix_table[0].vector;
1726 irq = bp->pdev->irq;
1728 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1731 static int bnx2x_setup_irqs(struct bnx2x *bp)
1734 if (bp->flags & USING_MSIX_FLAG &&
1735 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1736 rc = bnx2x_req_msix_irqs(bp);
1740 rc = bnx2x_req_irq(bp);
1742 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1745 if (bp->flags & USING_MSI_FLAG) {
1746 bp->dev->irq = bp->pdev->irq;
1747 netdev_info(bp->dev, "using MSI IRQ %d\n",
1750 if (bp->flags & USING_MSIX_FLAG) {
1751 bp->dev->irq = bp->msix_table[0].vector;
1752 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1760 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1764 for_each_rx_queue_cnic(bp, i) {
1765 bnx2x_fp_init_lock(&bp->fp[i]);
1766 napi_enable(&bnx2x_fp(bp, i, napi));
1770 static void bnx2x_napi_enable(struct bnx2x *bp)
1774 for_each_eth_queue(bp, i) {
1775 bnx2x_fp_init_lock(&bp->fp[i]);
1776 napi_enable(&bnx2x_fp(bp, i, napi));
1780 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1785 for_each_rx_queue_cnic(bp, i) {
1786 napi_disable(&bnx2x_fp(bp, i, napi));
1787 while (!bnx2x_fp_lock_napi(&bp->fp[i]))
1793 static void bnx2x_napi_disable(struct bnx2x *bp)
1798 for_each_eth_queue(bp, i) {
1799 napi_disable(&bnx2x_fp(bp, i, napi));
1800 while (!bnx2x_fp_lock_napi(&bp->fp[i]))
1806 void bnx2x_netif_start(struct bnx2x *bp)
1808 if (netif_running(bp->dev)) {
1809 bnx2x_napi_enable(bp);
1810 if (CNIC_LOADED(bp))
1811 bnx2x_napi_enable_cnic(bp);
1812 bnx2x_int_enable(bp);
1813 if (bp->state == BNX2X_STATE_OPEN)
1814 netif_tx_wake_all_queues(bp->dev);
1818 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1820 bnx2x_int_disable_sync(bp, disable_hw);
1821 bnx2x_napi_disable(bp);
1822 if (CNIC_LOADED(bp))
1823 bnx2x_napi_disable_cnic(bp);
1826 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1828 struct bnx2x *bp = netdev_priv(dev);
1830 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1831 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1832 u16 ether_type = ntohs(hdr->h_proto);
1834 /* Skip VLAN tag if present */
1835 if (ether_type == ETH_P_8021Q) {
1836 struct vlan_ethhdr *vhdr =
1837 (struct vlan_ethhdr *)skb->data;
1839 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1842 /* If ethertype is FCoE or FIP - use FCoE ring */
1843 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1844 return bnx2x_fcoe_tx(bp, txq_index);
1847 /* select a non-FCoE queue */
1848 return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
1851 void bnx2x_set_num_queues(struct bnx2x *bp)
1854 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1856 /* override in STORAGE SD modes */
1857 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1858 bp->num_ethernet_queues = 1;
1860 /* Add special queues */
1861 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1862 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1864 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1868 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1870 * @bp: Driver handle
1872 * We currently support for at most 16 Tx queues for each CoS thus we will
1873 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1876 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1877 * index after all ETH L2 indices.
1879 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1880 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1881 * 16..31,...) with indices that are not coupled with any real Tx queue.
1883 * The proper configuration of skb->queue_mapping is handled by
1884 * bnx2x_select_queue() and __skb_tx_hash().
1886 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1887 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1889 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1893 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1894 rx = BNX2X_NUM_ETH_QUEUES(bp);
1896 /* account for fcoe queue */
1897 if (include_cnic && !NO_FCOE(bp)) {
1902 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1904 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1907 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1909 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1913 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1919 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1923 for_each_queue(bp, i) {
1924 struct bnx2x_fastpath *fp = &bp->fp[i];
1927 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1930 * Although there are no IP frames expected to arrive to
1931 * this ring we still want to add an
1932 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1935 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1938 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1939 IP_HEADER_ALIGNMENT_PADDING +
1942 BNX2X_FW_RX_ALIGN_END;
1943 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
1944 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1945 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1947 fp->rx_frag_size = 0;
1951 static int bnx2x_init_rss(struct bnx2x *bp)
1954 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1956 /* Prepare the initial contents for the indirection table if RSS is
1959 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1960 bp->rss_conf_obj.ind_table[i] =
1962 ethtool_rxfh_indir_default(i, num_eth_queues);
1965 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1966 * per-port, so if explicit configuration is needed , do it only
1969 * For 57712 and newer on the other hand it's a per-function
1972 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
1975 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1976 bool config_hash, bool enable)
1978 struct bnx2x_config_rss_params params = {NULL};
1980 /* Although RSS is meaningless when there is a single HW queue we
1981 * still need it enabled in order to have HW Rx hash generated.
1983 * if (!is_eth_multi(bp))
1984 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1987 params.rss_obj = rss_obj;
1989 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
1992 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
1994 /* RSS configuration */
1995 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
1996 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
1997 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
1998 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
1999 if (rss_obj->udp_rss_v4)
2000 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
2001 if (rss_obj->udp_rss_v6)
2002 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
2004 __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags);
2008 params.rss_result_mask = MULTI_MASK;
2010 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2014 prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
2015 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
2019 return bnx2x_config_rss(bp, ¶ms);
2021 return bnx2x_vfpf_config_rss(bp, ¶ms);
2024 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2026 struct bnx2x_func_state_params func_params = {NULL};
2028 /* Prepare parameters for function state transitions */
2029 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2031 func_params.f_obj = &bp->func_obj;
2032 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2034 func_params.params.hw_init.load_phase = load_code;
2036 return bnx2x_func_state_change(bp, &func_params);
2040 * Cleans the object that have internal lists without sending
2041 * ramrods. Should be run when interrupts are disabled.
2043 void bnx2x_squeeze_objects(struct bnx2x *bp)
2046 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2047 struct bnx2x_mcast_ramrod_params rparam = {NULL};
2048 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2050 /***************** Cleanup MACs' object first *************************/
2052 /* Wait for completion of requested */
2053 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2054 /* Perform a dry cleanup */
2055 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2057 /* Clean ETH primary MAC */
2058 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2059 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2062 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2064 /* Cleanup UC list */
2066 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2067 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2070 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2072 /***************** Now clean mcast object *****************************/
2073 rparam.mcast_obj = &bp->mcast_obj;
2074 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2076 /* Add a DEL command... - Since we're doing a driver cleanup only,
2077 * we take a lock surrounding both the initial send and the CONTs,
2078 * as we don't want a true completion to disrupt us in the middle.
2080 netif_addr_lock_bh(bp->dev);
2081 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2083 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2086 /* ...and wait until all pending commands are cleared */
2087 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2090 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2092 netif_addr_unlock_bh(bp->dev);
2096 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2098 netif_addr_unlock_bh(bp->dev);
2101 #ifndef BNX2X_STOP_ON_ERROR
2102 #define LOAD_ERROR_EXIT(bp, label) \
2104 (bp)->state = BNX2X_STATE_ERROR; \
2108 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2110 bp->cnic_loaded = false; \
2113 #else /*BNX2X_STOP_ON_ERROR*/
2114 #define LOAD_ERROR_EXIT(bp, label) \
2116 (bp)->state = BNX2X_STATE_ERROR; \
2120 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2122 bp->cnic_loaded = false; \
2126 #endif /*BNX2X_STOP_ON_ERROR*/
2128 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2130 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2131 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2135 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2137 int num_groups, vf_headroom = 0;
2138 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2140 /* number of queues for statistics is number of eth queues + FCoE */
2141 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2143 /* Total number of FW statistics requests =
2144 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2145 * and fcoe l2 queue) stats + num of queues (which includes another 1
2146 * for fcoe l2 queue if applicable)
2148 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2150 /* vf stats appear in the request list, but their data is allocated by
2151 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2152 * it is used to determine where to place the vf stats queries in the
2156 vf_headroom = bnx2x_vf_headroom(bp);
2158 /* Request is built from stats_query_header and an array of
2159 * stats_query_cmd_group each of which contains
2160 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2161 * configured in the stats_query_header.
2164 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2165 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2168 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2169 bp->fw_stats_num, vf_headroom, num_groups);
2170 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2171 num_groups * sizeof(struct stats_query_cmd_group);
2173 /* Data for statistics requests + stats_counter
2174 * stats_counter holds per-STORM counters that are incremented
2175 * when STORM has finished with the current request.
2176 * memory for FCoE offloaded statistics are counted anyway,
2177 * even if they will not be sent.
2178 * VF stats are not accounted for here as the data of VF stats is stored
2179 * in memory allocated by the VF, not here.
2181 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2182 sizeof(struct per_pf_stats) +
2183 sizeof(struct fcoe_statistics_params) +
2184 sizeof(struct per_queue_stats) * num_queue_stats +
2185 sizeof(struct stats_counter);
2187 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2188 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2191 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2192 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2193 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2194 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2195 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2196 bp->fw_stats_req_sz;
2198 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2199 U64_HI(bp->fw_stats_req_mapping),
2200 U64_LO(bp->fw_stats_req_mapping));
2201 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2202 U64_HI(bp->fw_stats_data_mapping),
2203 U64_LO(bp->fw_stats_data_mapping));
2207 bnx2x_free_fw_stats_mem(bp);
2208 BNX2X_ERR("Can't allocate FW stats memory\n");
2212 /* send load request to mcp and analyze response */
2213 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2219 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2220 DRV_MSG_SEQ_NUMBER_MASK);
2221 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2223 /* Get current FW pulse sequence */
2224 bp->fw_drv_pulse_wr_seq =
2225 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2226 DRV_PULSE_SEQ_MASK);
2227 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2229 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2231 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2232 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2235 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2237 /* if mcp fails to respond we must abort */
2238 if (!(*load_code)) {
2239 BNX2X_ERR("MCP response failure, aborting\n");
2243 /* If mcp refused (e.g. other port is in diagnostic mode) we
2246 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2247 BNX2X_ERR("MCP refused load request, aborting\n");
2253 /* check whether another PF has already loaded FW to chip. In
2254 * virtualized environments a pf from another VM may have already
2255 * initialized the device including loading FW
2257 int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
2259 /* is another pf loaded on this engine? */
2260 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2261 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2262 /* build my FW version dword */
2263 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2264 (BCM_5710_FW_MINOR_VERSION << 8) +
2265 (BCM_5710_FW_REVISION_VERSION << 16) +
2266 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2268 /* read loaded FW from chip */
2269 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2271 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2274 /* abort nic load if version mismatch */
2275 if (my_fw != loaded_fw) {
2276 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2284 /* returns the "mcp load_code" according to global load_count array */
2285 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2287 int path = BP_PATH(bp);
2289 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2290 path, load_count[path][0], load_count[path][1],
2291 load_count[path][2]);
2292 load_count[path][0]++;
2293 load_count[path][1 + port]++;
2294 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2295 path, load_count[path][0], load_count[path][1],
2296 load_count[path][2]);
2297 if (load_count[path][0] == 1)
2298 return FW_MSG_CODE_DRV_LOAD_COMMON;
2299 else if (load_count[path][1 + port] == 1)
2300 return FW_MSG_CODE_DRV_LOAD_PORT;
2302 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2305 /* mark PMF if applicable */
2306 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2308 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2309 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2310 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2312 /* We need the barrier to ensure the ordering between the
2313 * writing to bp->port.pmf here and reading it from the
2314 * bnx2x_periodic_task().
2321 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2324 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2326 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2327 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2328 (bp->common.shmem2_base)) {
2329 if (SHMEM2_HAS(bp, dcc_support))
2330 SHMEM2_WR(bp, dcc_support,
2331 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2332 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2333 if (SHMEM2_HAS(bp, afex_driver_support))
2334 SHMEM2_WR(bp, afex_driver_support,
2335 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2338 /* Set AFEX default VLAN tag to an invalid value */
2339 bp->afex_def_vlan_tag = -1;
2343 * bnx2x_bz_fp - zero content of the fastpath structure.
2345 * @bp: driver handle
2346 * @index: fastpath index to be zeroed
2348 * Makes sure the contents of the bp->fp[index].napi is kept
2351 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2353 struct bnx2x_fastpath *fp = &bp->fp[index];
2355 struct napi_struct orig_napi = fp->napi;
2356 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2358 /* bzero bnx2x_fastpath contents */
2360 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2361 sizeof(struct bnx2x_agg_info));
2362 memset(fp, 0, sizeof(*fp));
2364 /* Restore the NAPI object as it has been already initialized */
2365 fp->napi = orig_napi;
2366 fp->tpa_info = orig_tpa_info;
2370 fp->max_cos = bp->max_cos;
2372 /* Special queues support only one CoS */
2375 /* Init txdata pointers */
2377 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2379 for_each_cos_in_tx_queue(fp, cos)
2380 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2381 BNX2X_NUM_ETH_QUEUES(bp) + index];
2383 /* set the tpa flag for each queue. The tpa flag determines the queue
2384 * minimal size so it must be set prior to queue memory allocation
2386 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2387 (bp->flags & GRO_ENABLE_FLAG &&
2388 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2389 if (bp->flags & TPA_ENABLE_FLAG)
2390 fp->mode = TPA_MODE_LRO;
2391 else if (bp->flags & GRO_ENABLE_FLAG)
2392 fp->mode = TPA_MODE_GRO;
2394 /* We don't want TPA on an FCoE L2 ring */
2396 fp->disable_tpa = 1;
2399 int bnx2x_load_cnic(struct bnx2x *bp)
2401 int i, rc, port = BP_PORT(bp);
2403 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2405 mutex_init(&bp->cnic_mutex);
2408 rc = bnx2x_alloc_mem_cnic(bp);
2410 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2411 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2415 rc = bnx2x_alloc_fp_mem_cnic(bp);
2417 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2418 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2421 /* Update the number of queues with the cnic queues */
2422 rc = bnx2x_set_real_num_queues(bp, 1);
2424 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2425 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2428 /* Add all CNIC NAPI objects */
2429 bnx2x_add_all_napi_cnic(bp);
2430 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2431 bnx2x_napi_enable_cnic(bp);
2433 rc = bnx2x_init_hw_func_cnic(bp);
2435 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2437 bnx2x_nic_init_cnic(bp);
2440 /* Enable Timer scan */
2441 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2443 /* setup cnic queues */
2444 for_each_cnic_queue(bp, i) {
2445 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2447 BNX2X_ERR("Queue setup failed\n");
2448 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2453 /* Initialize Rx filter. */
2454 bnx2x_set_rx_mode_inner(bp);
2456 /* re-read iscsi info */
2457 bnx2x_get_iscsi_info(bp);
2458 bnx2x_setup_cnic_irq_info(bp);
2459 bnx2x_setup_cnic_info(bp);
2460 bp->cnic_loaded = true;
2461 if (bp->state == BNX2X_STATE_OPEN)
2462 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2464 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2468 #ifndef BNX2X_STOP_ON_ERROR
2470 /* Disable Timer scan */
2471 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2474 bnx2x_napi_disable_cnic(bp);
2475 /* Update the number of queues without the cnic queues */
2476 rc = bnx2x_set_real_num_queues(bp, 0);
2478 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2480 BNX2X_ERR("CNIC-related load failed\n");
2481 bnx2x_free_fp_mem_cnic(bp);
2482 bnx2x_free_mem_cnic(bp);
2484 #endif /* ! BNX2X_STOP_ON_ERROR */
2487 /* must be called with rtnl_lock */
2488 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2490 int port = BP_PORT(bp);
2491 int i, rc = 0, load_code = 0;
2493 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2495 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2497 #ifdef BNX2X_STOP_ON_ERROR
2498 if (unlikely(bp->panic)) {
2499 BNX2X_ERR("Can't load NIC when there is panic\n");
2504 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2506 /* zero the structure w/o any lock, before SP handler is initialized */
2507 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2508 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2509 &bp->last_reported_link.link_report_flags);
2512 /* must be called before memory allocation and HW init */
2513 bnx2x_ilt_set_info(bp);
2516 * Zero fastpath structures preserving invariants like napi, which are
2517 * allocated only once, fp index, max_cos, bp pointer.
2518 * Also set fp->disable_tpa and txdata_ptr.
2520 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2521 for_each_queue(bp, i)
2523 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2524 bp->num_cnic_queues) *
2525 sizeof(struct bnx2x_fp_txdata));
2527 bp->fcoe_init = false;
2529 /* Set the receive queues buffer size */
2530 bnx2x_set_rx_buf_size(bp);
2533 rc = bnx2x_alloc_mem(bp);
2535 BNX2X_ERR("Unable to allocate bp memory\n");
2540 /* Allocated memory for FW statistics */
2541 if (bnx2x_alloc_fw_stats_mem(bp))
2542 LOAD_ERROR_EXIT(bp, load_error0);
2544 /* need to be done after alloc mem, since it's self adjusting to amount
2545 * of memory available for RSS queues
2547 rc = bnx2x_alloc_fp_mem(bp);
2549 BNX2X_ERR("Unable to allocate memory for fps\n");
2550 LOAD_ERROR_EXIT(bp, load_error0);
2553 /* request pf to initialize status blocks */
2555 rc = bnx2x_vfpf_init(bp);
2557 LOAD_ERROR_EXIT(bp, load_error0);
2560 /* As long as bnx2x_alloc_mem() may possibly update
2561 * bp->num_queues, bnx2x_set_real_num_queues() should always
2562 * come after it. At this stage cnic queues are not counted.
2564 rc = bnx2x_set_real_num_queues(bp, 0);
2566 BNX2X_ERR("Unable to set real_num_queues\n");
2567 LOAD_ERROR_EXIT(bp, load_error0);
2570 /* configure multi cos mappings in kernel.
2571 * this configuration may be overridden by a multi class queue
2572 * discipline or by a dcbx negotiation result.
2574 bnx2x_setup_tc(bp->dev, bp->max_cos);
2576 /* Add all NAPI objects */
2577 bnx2x_add_all_napi(bp);
2578 DP(NETIF_MSG_IFUP, "napi added\n");
2579 bnx2x_napi_enable(bp);
2582 /* set pf load just before approaching the MCP */
2583 bnx2x_set_pf_load(bp);
2585 /* if mcp exists send load request and analyze response */
2586 if (!BP_NOMCP(bp)) {
2587 /* attempt to load pf */
2588 rc = bnx2x_nic_load_request(bp, &load_code);
2590 LOAD_ERROR_EXIT(bp, load_error1);
2592 /* what did mcp say? */
2593 rc = bnx2x_nic_load_analyze_req(bp, load_code);
2595 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2596 LOAD_ERROR_EXIT(bp, load_error2);
2599 load_code = bnx2x_nic_load_no_mcp(bp, port);
2602 /* mark pmf if applicable */
2603 bnx2x_nic_load_pmf(bp, load_code);
2605 /* Init Function state controlling object */
2606 bnx2x__init_func_obj(bp);
2609 rc = bnx2x_init_hw(bp, load_code);
2611 BNX2X_ERR("HW init failed, aborting\n");
2612 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2613 LOAD_ERROR_EXIT(bp, load_error2);
2617 bnx2x_pre_irq_nic_init(bp);
2619 /* Connect to IRQs */
2620 rc = bnx2x_setup_irqs(bp);
2622 BNX2X_ERR("setup irqs failed\n");
2624 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2625 LOAD_ERROR_EXIT(bp, load_error2);
2628 /* Init per-function objects */
2630 /* Setup NIC internals and enable interrupts */
2631 bnx2x_post_irq_nic_init(bp, load_code);
2633 bnx2x_init_bp_objs(bp);
2634 bnx2x_iov_nic_init(bp);
2636 /* Set AFEX default VLAN tag to an invalid value */
2637 bp->afex_def_vlan_tag = -1;
2638 bnx2x_nic_load_afex_dcc(bp, load_code);
2639 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2640 rc = bnx2x_func_start(bp);
2642 BNX2X_ERR("Function start failed!\n");
2643 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2645 LOAD_ERROR_EXIT(bp, load_error3);
2648 /* Send LOAD_DONE command to MCP */
2649 if (!BP_NOMCP(bp)) {
2650 load_code = bnx2x_fw_command(bp,
2651 DRV_MSG_CODE_LOAD_DONE, 0);
2653 BNX2X_ERR("MCP response failure, aborting\n");
2655 LOAD_ERROR_EXIT(bp, load_error3);
2659 /* initialize FW coalescing state machines in RAM */
2660 bnx2x_update_coalesce(bp);
2663 /* setup the leading queue */
2664 rc = bnx2x_setup_leading(bp);
2666 BNX2X_ERR("Setup leading failed!\n");
2667 LOAD_ERROR_EXIT(bp, load_error3);
2670 /* set up the rest of the queues */
2671 for_each_nondefault_eth_queue(bp, i) {
2673 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2675 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2677 BNX2X_ERR("Queue %d setup failed\n", i);
2678 LOAD_ERROR_EXIT(bp, load_error3);
2683 rc = bnx2x_init_rss(bp);
2685 BNX2X_ERR("PF RSS init failed\n");
2686 LOAD_ERROR_EXIT(bp, load_error3);
2689 /* Now when Clients are configured we are ready to work */
2690 bp->state = BNX2X_STATE_OPEN;
2692 /* Configure a ucast MAC */
2694 rc = bnx2x_set_eth_mac(bp, true);
2696 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2699 BNX2X_ERR("Setting Ethernet MAC failed\n");
2700 LOAD_ERROR_EXIT(bp, load_error3);
2703 if (IS_PF(bp) && bp->pending_max) {
2704 bnx2x_update_max_mf_config(bp, bp->pending_max);
2705 bp->pending_max = 0;
2709 rc = bnx2x_initial_phy_init(bp, load_mode);
2711 LOAD_ERROR_EXIT(bp, load_error3);
2713 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2715 /* Start fast path */
2717 /* Initialize Rx filter. */
2718 bnx2x_set_rx_mode_inner(bp);
2721 switch (load_mode) {
2723 /* Tx queue should be only re-enabled */
2724 netif_tx_wake_all_queues(bp->dev);
2728 netif_tx_start_all_queues(bp->dev);
2729 smp_mb__after_clear_bit();
2733 case LOAD_LOOPBACK_EXT:
2734 bp->state = BNX2X_STATE_DIAG;
2742 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2744 bnx2x__link_status_update(bp);
2746 /* start the timer */
2747 mod_timer(&bp->timer, jiffies + bp->current_interval);
2749 if (CNIC_ENABLED(bp))
2750 bnx2x_load_cnic(bp);
2752 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2753 /* mark driver is loaded in shmem2 */
2755 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2756 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2757 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2758 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2761 /* Wait for all pending SP commands to complete */
2762 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2763 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2764 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2768 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2769 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2770 bnx2x_dcbx_init(bp, false);
2772 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2776 #ifndef BNX2X_STOP_ON_ERROR
2779 bnx2x_int_disable_sync(bp, 1);
2781 /* Clean queueable objects */
2782 bnx2x_squeeze_objects(bp);
2785 /* Free SKBs, SGEs, TPA pool and driver internals */
2786 bnx2x_free_skbs(bp);
2787 for_each_rx_queue(bp, i)
2788 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2793 if (IS_PF(bp) && !BP_NOMCP(bp)) {
2794 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2795 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2800 bnx2x_napi_disable(bp);
2801 bnx2x_del_all_napi(bp);
2803 /* clear pf_load status, as it was already set */
2805 bnx2x_clear_pf_load(bp);
2807 bnx2x_free_fp_mem(bp);
2808 bnx2x_free_fw_stats_mem(bp);
2812 #endif /* ! BNX2X_STOP_ON_ERROR */
2815 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2819 /* Wait until tx fastpath tasks complete */
2820 for_each_tx_queue(bp, i) {
2821 struct bnx2x_fastpath *fp = &bp->fp[i];
2823 for_each_cos_in_tx_queue(fp, cos)
2824 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2831 /* must be called with rtnl_lock */
2832 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2835 bool global = false;
2837 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2839 /* mark driver is unloaded in shmem2 */
2840 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2842 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2843 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2844 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2847 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2848 (bp->state == BNX2X_STATE_CLOSED ||
2849 bp->state == BNX2X_STATE_ERROR)) {
2850 /* We can get here if the driver has been unloaded
2851 * during parity error recovery and is either waiting for a
2852 * leader to complete or for other functions to unload and
2853 * then ifdown has been issued. In this case we want to
2854 * unload and let other functions to complete a recovery
2857 bp->recovery_state = BNX2X_RECOVERY_DONE;
2859 bnx2x_release_leader_lock(bp);
2862 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2863 BNX2X_ERR("Can't unload in closed or error state\n");
2867 /* Nothing to do during unload if previous bnx2x_nic_load()
2868 * have not completed successfully - all resources are released.
2870 * we can get here only after unsuccessful ndo_* callback, during which
2871 * dev->IFF_UP flag is still on.
2873 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2876 /* It's important to set the bp->state to the value different from
2877 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2878 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2880 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2883 /* indicate to VFs that the PF is going down */
2884 bnx2x_iov_channel_down(bp);
2886 if (CNIC_LOADED(bp))
2887 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2890 bnx2x_tx_disable(bp);
2891 netdev_reset_tc(bp->dev);
2893 bp->rx_mode = BNX2X_RX_MODE_NONE;
2895 del_timer_sync(&bp->timer);
2898 /* Set ALWAYS_ALIVE bit in shmem */
2899 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2900 bnx2x_drv_pulse(bp);
2901 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2902 bnx2x_save_statistics(bp);
2905 /* wait till consumers catch up with producers in all queues */
2906 bnx2x_drain_tx_queues(bp);
2908 /* if VF indicate to PF this function is going down (PF will delete sp
2909 * elements and clear initializations
2912 bnx2x_vfpf_close_vf(bp);
2913 else if (unload_mode != UNLOAD_RECOVERY)
2914 /* if this is a normal/close unload need to clean up chip*/
2915 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
2917 /* Send the UNLOAD_REQUEST to the MCP */
2918 bnx2x_send_unload_req(bp, unload_mode);
2920 /* Prevent transactions to host from the functions on the
2921 * engine that doesn't reset global blocks in case of global
2922 * attention once global blocks are reset and gates are opened
2923 * (the engine which leader will perform the recovery
2926 if (!CHIP_IS_E1x(bp))
2927 bnx2x_pf_disable(bp);
2929 /* Disable HW interrupts, NAPI */
2930 bnx2x_netif_stop(bp, 1);
2931 /* Delete all NAPI objects */
2932 bnx2x_del_all_napi(bp);
2933 if (CNIC_LOADED(bp))
2934 bnx2x_del_all_napi_cnic(bp);
2938 /* Report UNLOAD_DONE to MCP */
2939 bnx2x_send_unload_done(bp, false);
2943 * At this stage no more interrupts will arrive so we may safely clean
2944 * the queueable objects here in case they failed to get cleaned so far.
2947 bnx2x_squeeze_objects(bp);
2949 /* There should be no more pending SP commands at this stage */
2954 /* Free SKBs, SGEs, TPA pool and driver internals */
2955 bnx2x_free_skbs(bp);
2956 if (CNIC_LOADED(bp))
2957 bnx2x_free_skbs_cnic(bp);
2958 for_each_rx_queue(bp, i)
2959 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2961 bnx2x_free_fp_mem(bp);
2962 if (CNIC_LOADED(bp))
2963 bnx2x_free_fp_mem_cnic(bp);
2966 if (CNIC_LOADED(bp))
2967 bnx2x_free_mem_cnic(bp);
2971 bp->state = BNX2X_STATE_CLOSED;
2972 bp->cnic_loaded = false;
2974 /* Check if there are pending parity attentions. If there are - set
2975 * RECOVERY_IN_PROGRESS.
2977 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
2978 bnx2x_set_reset_in_progress(bp);
2980 /* Set RESET_IS_GLOBAL if needed */
2982 bnx2x_set_reset_global(bp);
2985 /* The last driver must disable a "close the gate" if there is no
2986 * parity attention or "process kill" pending.
2989 !bnx2x_clear_pf_load(bp) &&
2990 bnx2x_reset_is_done(bp, BP_PATH(bp)))
2991 bnx2x_disable_close_the_gate(bp);
2993 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2998 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3002 /* If there is no power capability, silently succeed */
3004 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3008 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3012 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3013 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3014 PCI_PM_CTRL_PME_STATUS));
3016 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3017 /* delay required during transition out of D3hot */
3022 /* If there are other clients above don't
3023 shut down the power */
3024 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3026 /* Don't shut down the power for emulation and FPGA */
3027 if (CHIP_REV_IS_SLOW(bp))
3030 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3034 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3036 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3039 /* No more memory access after this point until
3040 * device is brought back to D0.
3045 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3052 * net_device service functions
3054 int bnx2x_poll(struct napi_struct *napi, int budget)
3058 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3060 struct bnx2x *bp = fp->bp;
3063 #ifdef BNX2X_STOP_ON_ERROR
3064 if (unlikely(bp->panic)) {
3065 napi_complete(napi);
3069 if (!bnx2x_fp_lock_napi(fp))
3072 for_each_cos_in_tx_queue(fp, cos)
3073 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3074 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3076 if (bnx2x_has_rx_work(fp)) {
3077 work_done += bnx2x_rx_int(fp, budget - work_done);
3079 /* must not complete if we consumed full budget */
3080 if (work_done >= budget) {
3081 bnx2x_fp_unlock_napi(fp);
3086 /* Fall out from the NAPI loop if needed */
3087 if (!bnx2x_fp_unlock_napi(fp) &&
3088 !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3090 /* No need to update SB for FCoE L2 ring as long as
3091 * it's connected to the default SB and the SB
3092 * has been updated when NAPI was scheduled.
3094 if (IS_FCOE_FP(fp)) {
3095 napi_complete(napi);
3098 bnx2x_update_fpsb_idx(fp);
3099 /* bnx2x_has_rx_work() reads the status block,
3100 * thus we need to ensure that status block indices
3101 * have been actually read (bnx2x_update_fpsb_idx)
3102 * prior to this check (bnx2x_has_rx_work) so that
3103 * we won't write the "newer" value of the status block
3104 * to IGU (if there was a DMA right after
3105 * bnx2x_has_rx_work and if there is no rmb, the memory
3106 * reading (bnx2x_update_fpsb_idx) may be postponed
3107 * to right before bnx2x_ack_sb). In this case there
3108 * will never be another interrupt until there is
3109 * another update of the status block, while there
3110 * is still unhandled work.
3114 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3115 napi_complete(napi);
3116 /* Re-enable interrupts */
3117 DP(NETIF_MSG_RX_STATUS,
3118 "Update index to %d\n", fp->fp_hc_idx);
3119 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3120 le16_to_cpu(fp->fp_hc_idx),
3130 #ifdef CONFIG_NET_RX_BUSY_POLL
3131 /* must be called with local_bh_disable()d */
3132 int bnx2x_low_latency_recv(struct napi_struct *napi)
3134 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3136 struct bnx2x *bp = fp->bp;
3139 if ((bp->state == BNX2X_STATE_CLOSED) ||
3140 (bp->state == BNX2X_STATE_ERROR) ||
3141 (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3142 return LL_FLUSH_FAILED;
3144 if (!bnx2x_fp_lock_poll(fp))
3145 return LL_FLUSH_BUSY;
3147 if (bnx2x_has_rx_work(fp))
3148 found = bnx2x_rx_int(fp, 4);
3150 bnx2x_fp_unlock_poll(fp);
3156 /* we split the first BD into headers and data BDs
3157 * to ease the pain of our fellow microcode engineers
3158 * we use one mapping for both BDs
3160 static u16 bnx2x_tx_split(struct bnx2x *bp,
3161 struct bnx2x_fp_txdata *txdata,
3162 struct sw_tx_bd *tx_buf,
3163 struct eth_tx_start_bd **tx_bd, u16 hlen,
3166 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3167 struct eth_tx_bd *d_tx_bd;
3169 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3171 /* first fix first BD */
3172 h_tx_bd->nbytes = cpu_to_le16(hlen);
3174 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3175 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3177 /* now get a new data BD
3178 * (after the pbd) and fill it */
3179 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3180 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3182 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3183 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3185 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3186 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3187 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3189 /* this marks the BD as one that has no individual mapping */
3190 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3192 DP(NETIF_MSG_TX_QUEUED,
3193 "TSO split data size is %d (%x:%x)\n",
3194 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3197 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3202 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3203 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3204 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3206 __sum16 tsum = (__force __sum16) csum;
3209 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3210 csum_partial(t_header - fix, fix, 0)));
3213 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3214 csum_partial(t_header, -fix, 0)));
3216 return bswab16(tsum);
3219 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3225 if (skb->ip_summed != CHECKSUM_PARTIAL)
3228 protocol = vlan_get_protocol(skb);
3229 if (protocol == htons(ETH_P_IPV6)) {
3231 prot = ipv6_hdr(skb)->nexthdr;
3234 prot = ip_hdr(skb)->protocol;
3237 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3238 if (inner_ip_hdr(skb)->version == 6) {
3239 rc |= XMIT_CSUM_ENC_V6;
3240 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3241 rc |= XMIT_CSUM_TCP;
3243 rc |= XMIT_CSUM_ENC_V4;
3244 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3245 rc |= XMIT_CSUM_TCP;
3248 if (prot == IPPROTO_TCP)
3249 rc |= XMIT_CSUM_TCP;
3251 if (skb_is_gso_v6(skb)) {
3252 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3253 if (rc & XMIT_CSUM_ENC)
3254 rc |= XMIT_GSO_ENC_V6;
3255 } else if (skb_is_gso(skb)) {
3256 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3257 if (rc & XMIT_CSUM_ENC)
3258 rc |= XMIT_GSO_ENC_V4;
3264 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3265 /* check if packet requires linearization (packet is too fragmented)
3266 no need to check fragmentation if page size > 8K (there will be no
3267 violation to FW restrictions) */
3268 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3273 int first_bd_sz = 0;
3275 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3276 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3278 if (xmit_type & XMIT_GSO) {
3279 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3280 /* Check if LSO packet needs to be copied:
3281 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3282 int wnd_size = MAX_FETCH_BD - 3;
3283 /* Number of windows to check */
3284 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3289 /* Headers length */
3290 hlen = (int)(skb_transport_header(skb) - skb->data) +
3293 /* Amount of data (w/o headers) on linear part of SKB*/
3294 first_bd_sz = skb_headlen(skb) - hlen;
3296 wnd_sum = first_bd_sz;
3298 /* Calculate the first sum - it's special */
3299 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3301 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3303 /* If there was data on linear skb data - check it */
3304 if (first_bd_sz > 0) {
3305 if (unlikely(wnd_sum < lso_mss)) {
3310 wnd_sum -= first_bd_sz;
3313 /* Others are easier: run through the frag list and
3314 check all windows */
3315 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3317 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3319 if (unlikely(wnd_sum < lso_mss)) {
3324 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3327 /* in non-LSO too fragmented packet should always
3334 if (unlikely(to_copy))
3335 DP(NETIF_MSG_TX_QUEUED,
3336 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3337 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3338 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3344 static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3347 struct ipv6hdr *ipv6;
3349 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3350 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3351 ETH_TX_PARSE_BD_E2_LSO_MSS;
3353 if (xmit_type & XMIT_GSO_ENC_V6)
3354 ipv6 = inner_ipv6_hdr(skb);
3355 else if (xmit_type & XMIT_GSO_V6)
3356 ipv6 = ipv6_hdr(skb);
3360 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
3361 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3365 * bnx2x_set_pbd_gso - update PBD in GSO case.
3369 * @xmit_type: xmit flags
3371 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3372 struct eth_tx_parse_bd_e1x *pbd,
3373 struct eth_tx_start_bd *tx_start_bd,
3376 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3377 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3378 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3380 if (xmit_type & XMIT_GSO_V4) {
3381 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3382 pbd->tcp_pseudo_csum =
3383 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3385 0, IPPROTO_TCP, 0));
3387 /* GSO on 57710/57711 needs FW to calculate IP checksum */
3388 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3390 pbd->tcp_pseudo_csum =
3391 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3392 &ipv6_hdr(skb)->daddr,
3393 0, IPPROTO_TCP, 0));
3397 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3401 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3403 * @bp: driver handle
3405 * @parsing_data: data to be updated
3406 * @xmit_type: xmit flags
3408 * 57712/578xx related, when skb has encapsulation
3410 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3411 u32 *parsing_data, u32 xmit_type)
3414 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3415 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3416 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3418 if (xmit_type & XMIT_CSUM_TCP) {
3419 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3420 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3421 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3423 return skb_inner_transport_header(skb) +
3424 inner_tcp_hdrlen(skb) - skb->data;
3427 /* We support checksum offload for TCP and UDP only.
3428 * No need to pass the UDP header length - it's a constant.
3430 return skb_inner_transport_header(skb) +
3431 sizeof(struct udphdr) - skb->data;
3435 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3437 * @bp: driver handle
3439 * @parsing_data: data to be updated
3440 * @xmit_type: xmit flags
3442 * 57712/578xx related
3444 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3445 u32 *parsing_data, u32 xmit_type)
3448 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3449 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3450 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3452 if (xmit_type & XMIT_CSUM_TCP) {
3453 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3454 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3455 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3457 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3459 /* We support checksum offload for TCP and UDP only.
3460 * No need to pass the UDP header length - it's a constant.
3462 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3465 /* set FW indication according to inner or outer protocols if tunneled */
3466 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3467 struct eth_tx_start_bd *tx_start_bd,
3470 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3472 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3473 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3475 if (!(xmit_type & XMIT_CSUM_TCP))
3476 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3480 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3482 * @bp: driver handle
3484 * @pbd: parse BD to be updated
3485 * @xmit_type: xmit flags
3487 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3488 struct eth_tx_parse_bd_e1x *pbd,
3491 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3493 /* for now NS flag is not used in Linux */
3496 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3497 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3499 pbd->ip_hlen_w = (skb_transport_header(skb) -
3500 skb_network_header(skb)) >> 1;
3502 hlen += pbd->ip_hlen_w;
3504 /* We support checksum offload for TCP and UDP only */
3505 if (xmit_type & XMIT_CSUM_TCP)
3506 hlen += tcp_hdrlen(skb) / 2;
3508 hlen += sizeof(struct udphdr) / 2;
3510 pbd->total_hlen_w = cpu_to_le16(hlen);
3513 if (xmit_type & XMIT_CSUM_TCP) {
3514 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3517 s8 fix = SKB_CS_OFF(skb); /* signed! */
3519 DP(NETIF_MSG_TX_QUEUED,
3520 "hlen %d fix %d csum before fix %x\n",
3521 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3523 /* HW bug: fixup the CSUM */
3524 pbd->tcp_pseudo_csum =
3525 bnx2x_csum_fix(skb_transport_header(skb),
3528 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3529 pbd->tcp_pseudo_csum);
3535 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3536 struct eth_tx_parse_bd_e2 *pbd_e2,
3537 struct eth_tx_parse_2nd_bd *pbd2,
3542 u8 outerip_off, outerip_len = 0;
3544 /* from outer IP to transport */
3545 hlen_w = (skb_inner_transport_header(skb) -
3546 skb_network_header(skb)) >> 1;
3549 hlen_w += inner_tcp_hdrlen(skb) >> 1;
3551 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3553 /* outer IP header info */
3554 if (xmit_type & XMIT_CSUM_V4) {
3555 struct iphdr *iph = ip_hdr(skb);
3556 u32 csum = (__force u32)(~iph->check) -
3557 (__force u32)iph->tot_len -
3558 (__force u32)iph->frag_off;
3560 pbd2->fw_ip_csum_wo_len_flags_frag =
3561 bswab16(csum_fold((__force __wsum)csum));
3563 pbd2->fw_ip_hdr_to_payload_w =
3564 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3567 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3569 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3571 if (xmit_type & XMIT_GSO_V4) {
3572 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3574 pbd_e2->data.tunnel_data.pseudo_csum =
3575 bswab16(~csum_tcpudp_magic(
3576 inner_ip_hdr(skb)->saddr,
3577 inner_ip_hdr(skb)->daddr,
3578 0, IPPROTO_TCP, 0));
3580 outerip_len = ip_hdr(skb)->ihl << 1;
3582 pbd_e2->data.tunnel_data.pseudo_csum =
3583 bswab16(~csum_ipv6_magic(
3584 &inner_ipv6_hdr(skb)->saddr,
3585 &inner_ipv6_hdr(skb)->daddr,
3586 0, IPPROTO_TCP, 0));
3589 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3593 (!!(xmit_type & XMIT_CSUM_V6) <<
3594 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3596 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3597 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3598 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3600 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3601 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3602 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3606 /* called with netif_tx_lock
3607 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3608 * netif_wake_queue()
3610 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3612 struct bnx2x *bp = netdev_priv(dev);
3614 struct netdev_queue *txq;
3615 struct bnx2x_fp_txdata *txdata;
3616 struct sw_tx_bd *tx_buf;
3617 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3618 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3619 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3620 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3621 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3622 u32 pbd_e2_parsing_data = 0;
3623 u16 pkt_prod, bd_prod;
3626 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3629 __le16 pkt_size = 0;
3631 u8 mac_type = UNICAST_ADDRESS;
3633 #ifdef BNX2X_STOP_ON_ERROR
3634 if (unlikely(bp->panic))
3635 return NETDEV_TX_BUSY;
3638 txq_index = skb_get_queue_mapping(skb);
3639 txq = netdev_get_tx_queue(dev, txq_index);
3641 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3643 txdata = &bp->bnx2x_txq[txq_index];
3645 /* enable this debug print to view the transmission queue being used
3646 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3647 txq_index, fp_index, txdata_index); */
3649 /* enable this debug print to view the transmission details
3650 DP(NETIF_MSG_TX_QUEUED,
3651 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3652 txdata->cid, fp_index, txdata_index, txdata, fp); */
3654 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3655 skb_shinfo(skb)->nr_frags +
3657 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3658 /* Handle special storage cases separately */
3659 if (txdata->tx_ring_size == 0) {
3660 struct bnx2x_eth_q_stats *q_stats =
3661 bnx2x_fp_qstats(bp, txdata->parent_fp);
3662 q_stats->driver_filtered_tx_pkt++;
3664 return NETDEV_TX_OK;
3666 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3667 netif_tx_stop_queue(txq);
3668 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3670 return NETDEV_TX_BUSY;
3673 DP(NETIF_MSG_TX_QUEUED,
3674 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
3675 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3676 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3679 eth = (struct ethhdr *)skb->data;
3681 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3682 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3683 if (is_broadcast_ether_addr(eth->h_dest))
3684 mac_type = BROADCAST_ADDRESS;
3686 mac_type = MULTICAST_ADDRESS;
3689 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3690 /* First, check if we need to linearize the skb (due to FW
3691 restrictions). No need to check fragmentation if page size > 8K
3692 (there will be no violation to FW restrictions) */
3693 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3694 /* Statistics of linearization */
3696 if (skb_linearize(skb) != 0) {
3697 DP(NETIF_MSG_TX_QUEUED,
3698 "SKB linearization failed - silently dropping this SKB\n");
3699 dev_kfree_skb_any(skb);
3700 return NETDEV_TX_OK;
3704 /* Map skb linear data for DMA */
3705 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3706 skb_headlen(skb), DMA_TO_DEVICE);
3707 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3708 DP(NETIF_MSG_TX_QUEUED,
3709 "SKB mapping failed - silently dropping this SKB\n");
3710 dev_kfree_skb_any(skb);
3711 return NETDEV_TX_OK;
3714 Please read carefully. First we use one BD which we mark as start,
3715 then we have a parsing info BD (used for TSO or xsum),
3716 and only then we have the rest of the TSO BDs.
3717 (don't forget to mark the last one as last,
3718 and to unmap only AFTER you write to the BD ...)
3719 And above all, all pdb sizes are in words - NOT DWORDS!
3722 /* get current pkt produced now - advance it just before sending packet
3723 * since mapping of pages may fail and cause packet to be dropped
3725 pkt_prod = txdata->tx_pkt_prod;
3726 bd_prod = TX_BD(txdata->tx_bd_prod);
3728 /* get a tx_buf and first BD
3729 * tx_start_bd may be changed during SPLIT,
3730 * but first_bd will always stay first
3732 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3733 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3734 first_bd = tx_start_bd;
3736 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3738 /* header nbd: indirectly zero other flags! */
3739 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3741 /* remember the first BD of the packet */
3742 tx_buf->first_bd = txdata->tx_bd_prod;
3746 DP(NETIF_MSG_TX_QUEUED,
3747 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3748 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3750 if (vlan_tx_tag_present(skb)) {
3751 tx_start_bd->vlan_or_ethertype =
3752 cpu_to_le16(vlan_tx_tag_get(skb));
3753 tx_start_bd->bd_flags.as_bitfield |=
3754 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3756 /* when transmitting in a vf, start bd must hold the ethertype
3757 * for fw to enforce it
3760 tx_start_bd->vlan_or_ethertype =
3761 cpu_to_le16(ntohs(eth->h_proto));
3763 /* used by FW for packet accounting */
3764 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3767 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3769 /* turn on parsing and get a BD */
3770 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3772 if (xmit_type & XMIT_CSUM)
3773 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3775 if (!CHIP_IS_E1x(bp)) {
3776 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3777 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3779 if (xmit_type & XMIT_CSUM_ENC) {
3780 u16 global_data = 0;
3782 /* Set PBD in enc checksum offload case */
3783 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3784 &pbd_e2_parsing_data,
3787 /* turn on 2nd parsing and get a BD */
3788 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3790 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3792 memset(pbd2, 0, sizeof(*pbd2));
3794 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3795 (skb_inner_network_header(skb) -
3798 if (xmit_type & XMIT_GSO_ENC)
3799 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3803 pbd2->global_data = cpu_to_le16(global_data);
3805 /* add addition parse BD indication to start BD */
3806 SET_FLAG(tx_start_bd->general_data,
3807 ETH_TX_START_BD_PARSE_NBDS, 1);
3808 /* set encapsulation flag in start BD */
3809 SET_FLAG(tx_start_bd->general_data,
3810 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3812 } else if (xmit_type & XMIT_CSUM) {
3813 /* Set PBD in checksum offload case w/o encapsulation */
3814 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3815 &pbd_e2_parsing_data,
3819 /* Add the macs to the parsing BD this is a vf */
3821 /* override GRE parameters in BD */
3822 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3823 &pbd_e2->data.mac_addr.src_mid,
3824 &pbd_e2->data.mac_addr.src_lo,
3827 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3828 &pbd_e2->data.mac_addr.dst_mid,
3829 &pbd_e2->data.mac_addr.dst_lo,
3833 SET_FLAG(pbd_e2_parsing_data,
3834 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3836 u16 global_data = 0;
3837 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3838 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3839 /* Set PBD in checksum offload case */
3840 if (xmit_type & XMIT_CSUM)
3841 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3843 SET_FLAG(global_data,
3844 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3845 pbd_e1x->global_data |= cpu_to_le16(global_data);
3848 /* Setup the data pointer of the first BD of the packet */
3849 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3850 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3851 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3852 pkt_size = tx_start_bd->nbytes;
3854 DP(NETIF_MSG_TX_QUEUED,
3855 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
3856 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3857 le16_to_cpu(tx_start_bd->nbytes),
3858 tx_start_bd->bd_flags.as_bitfield,
3859 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3861 if (xmit_type & XMIT_GSO) {
3863 DP(NETIF_MSG_TX_QUEUED,
3864 "TSO packet len %d hlen %d total len %d tso size %d\n",
3865 skb->len, hlen, skb_headlen(skb),
3866 skb_shinfo(skb)->gso_size);
3868 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3870 if (unlikely(skb_headlen(skb) > hlen)) {
3872 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3876 if (!CHIP_IS_E1x(bp))
3877 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3880 bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
3883 /* Set the PBD's parsing_data field if not zero
3884 * (for the chips newer than 57711).
3886 if (pbd_e2_parsing_data)
3887 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3889 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3891 /* Handle fragmented skb */
3892 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3893 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3895 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3896 skb_frag_size(frag), DMA_TO_DEVICE);
3897 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3898 unsigned int pkts_compl = 0, bytes_compl = 0;
3900 DP(NETIF_MSG_TX_QUEUED,
3901 "Unable to map page - dropping packet...\n");
3903 /* we need unmap all buffers already mapped
3905 * first_bd->nbd need to be properly updated
3906 * before call to bnx2x_free_tx_pkt
3908 first_bd->nbd = cpu_to_le16(nbd);
3909 bnx2x_free_tx_pkt(bp, txdata,
3910 TX_BD(txdata->tx_pkt_prod),
3911 &pkts_compl, &bytes_compl);
3912 return NETDEV_TX_OK;
3915 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3916 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3917 if (total_pkt_bd == NULL)
3918 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3920 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3921 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3922 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3923 le16_add_cpu(&pkt_size, skb_frag_size(frag));
3926 DP(NETIF_MSG_TX_QUEUED,
3927 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3928 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3929 le16_to_cpu(tx_data_bd->nbytes));
3932 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3934 /* update with actual num BDs */
3935 first_bd->nbd = cpu_to_le16(nbd);
3937 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3939 /* now send a tx doorbell, counting the next BD
3940 * if the packet contains or ends with it
3942 if (TX_BD_POFF(bd_prod) < nbd)
3945 /* total_pkt_bytes should be set on the first data BD if
3946 * it's not an LSO packet and there is more than one
3947 * data BD. In this case pkt_size is limited by an MTU value.
3948 * However we prefer to set it for an LSO packet (while we don't
3949 * have to) in order to save some CPU cycles in a none-LSO
3950 * case, when we much more care about them.
3952 if (total_pkt_bd != NULL)
3953 total_pkt_bd->total_pkt_bytes = pkt_size;
3956 DP(NETIF_MSG_TX_QUEUED,
3957 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
3958 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3959 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3960 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3961 le16_to_cpu(pbd_e1x->total_hlen_w));
3963 DP(NETIF_MSG_TX_QUEUED,
3964 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3966 pbd_e2->data.mac_addr.dst_hi,
3967 pbd_e2->data.mac_addr.dst_mid,
3968 pbd_e2->data.mac_addr.dst_lo,
3969 pbd_e2->data.mac_addr.src_hi,
3970 pbd_e2->data.mac_addr.src_mid,
3971 pbd_e2->data.mac_addr.src_lo,
3972 pbd_e2->parsing_data);
3973 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3975 netdev_tx_sent_queue(txq, skb->len);
3977 skb_tx_timestamp(skb);
3979 txdata->tx_pkt_prod++;
3981 * Make sure that the BD data is updated before updating the producer
3982 * since FW might read the BD right after the producer is updated.
3983 * This is only applicable for weak-ordered memory model archs such
3984 * as IA-64. The following barrier is also mandatory since FW will
3985 * assumes packets must have BDs.
3989 txdata->tx_db.data.prod += nbd;
3992 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
3996 txdata->tx_bd_prod += nbd;
3998 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
3999 netif_tx_stop_queue(txq);
4001 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4002 * ordering of set_bit() in netif_tx_stop_queue() and read of
4006 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4007 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4008 netif_tx_wake_queue(txq);
4012 return NETDEV_TX_OK;
4016 * bnx2x_setup_tc - routine to configure net_device for multi tc
4018 * @netdev: net device to configure
4019 * @tc: number of traffic classes to enable
4021 * callback connected to the ndo_setup_tc function pointer
4023 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4025 int cos, prio, count, offset;
4026 struct bnx2x *bp = netdev_priv(dev);
4028 /* setup tc must be called under rtnl lock */
4031 /* no traffic classes requested. Aborting */
4033 netdev_reset_tc(dev);
4037 /* requested to support too many traffic classes */
4038 if (num_tc > bp->max_cos) {
4039 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4040 num_tc, bp->max_cos);
4044 /* declare amount of supported traffic classes */
4045 if (netdev_set_num_tc(dev, num_tc)) {
4046 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4050 /* configure priority to traffic class mapping */
4051 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4052 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
4053 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4054 "mapping priority %d to tc %d\n",
4055 prio, bp->prio_to_cos[prio]);
4058 /* Use this configuration to differentiate tc0 from other COSes
4059 This can be used for ets or pfc, and save the effort of setting
4060 up a multio class queue disc or negotiating DCBX with a switch
4061 netdev_set_prio_tc_map(dev, 0, 0);
4062 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4063 for (prio = 1; prio < 16; prio++) {
4064 netdev_set_prio_tc_map(dev, prio, 1);
4065 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4068 /* configure traffic class to transmission queue mapping */
4069 for (cos = 0; cos < bp->max_cos; cos++) {
4070 count = BNX2X_NUM_ETH_QUEUES(bp);
4071 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4072 netdev_set_tc_queue(dev, cos, count, offset);
4073 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4074 "mapping tc %d to offset %d count %d\n",
4075 cos, offset, count);
4081 /* called with rtnl_lock */
4082 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4084 struct sockaddr *addr = p;
4085 struct bnx2x *bp = netdev_priv(dev);
4088 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4089 BNX2X_ERR("Requested MAC address is not valid\n");
4093 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4094 !is_zero_ether_addr(addr->sa_data)) {
4095 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
4099 if (netif_running(dev)) {
4100 rc = bnx2x_set_eth_mac(bp, false);
4105 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4107 if (netif_running(dev))
4108 rc = bnx2x_set_eth_mac(bp, true);
4113 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4115 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4116 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4121 if (IS_FCOE_IDX(fp_index)) {
4122 memset(sb, 0, sizeof(union host_hc_status_block));
4123 fp->status_blk_mapping = 0;
4126 if (!CHIP_IS_E1x(bp))
4127 BNX2X_PCI_FREE(sb->e2_sb,
4128 bnx2x_fp(bp, fp_index,
4129 status_blk_mapping),
4130 sizeof(struct host_hc_status_block_e2));
4132 BNX2X_PCI_FREE(sb->e1x_sb,
4133 bnx2x_fp(bp, fp_index,
4134 status_blk_mapping),
4135 sizeof(struct host_hc_status_block_e1x));
4139 if (!skip_rx_queue(bp, fp_index)) {
4140 bnx2x_free_rx_bds(fp);
4142 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4143 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4144 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4145 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4146 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4148 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4149 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4150 sizeof(struct eth_fast_path_rx_cqe) *
4154 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4155 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4156 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4157 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4161 if (!skip_tx_queue(bp, fp_index)) {
4162 /* fastpath tx rings: tx_buf tx_desc */
4163 for_each_cos_in_tx_queue(fp, cos) {
4164 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4166 DP(NETIF_MSG_IFDOWN,
4167 "freeing tx memory of fp %d cos %d cid %d\n",
4168 fp_index, cos, txdata->cid);
4170 BNX2X_FREE(txdata->tx_buf_ring);
4171 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4172 txdata->tx_desc_mapping,
4173 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4176 /* end of fastpath */
4179 void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4182 for_each_cnic_queue(bp, i)
4183 bnx2x_free_fp_mem_at(bp, i);
4186 void bnx2x_free_fp_mem(struct bnx2x *bp)
4189 for_each_eth_queue(bp, i)
4190 bnx2x_free_fp_mem_at(bp, i);
4193 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4195 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4196 if (!CHIP_IS_E1x(bp)) {
4197 bnx2x_fp(bp, index, sb_index_values) =
4198 (__le16 *)status_blk.e2_sb->sb.index_values;
4199 bnx2x_fp(bp, index, sb_running_index) =
4200 (__le16 *)status_blk.e2_sb->sb.running_index;
4202 bnx2x_fp(bp, index, sb_index_values) =
4203 (__le16 *)status_blk.e1x_sb->sb.index_values;
4204 bnx2x_fp(bp, index, sb_running_index) =
4205 (__le16 *)status_blk.e1x_sb->sb.running_index;
4209 /* Returns the number of actually allocated BDs */
4210 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4213 struct bnx2x *bp = fp->bp;
4214 u16 ring_prod, cqe_ring_prod;
4215 int i, failure_cnt = 0;
4217 fp->rx_comp_cons = 0;
4218 cqe_ring_prod = ring_prod = 0;
4220 /* This routine is called only during fo init so
4221 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4223 for (i = 0; i < rx_ring_size; i++) {
4224 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
4228 ring_prod = NEXT_RX_IDX(ring_prod);
4229 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4230 WARN_ON(ring_prod <= (i - failure_cnt));
4234 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4235 i - failure_cnt, fp->index);
4237 fp->rx_bd_prod = ring_prod;
4238 /* Limit the CQE producer by the CQE ring size */
4239 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4241 fp->rx_pkt = fp->rx_calls = 0;
4243 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4245 return i - failure_cnt;
4248 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4252 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4253 struct eth_rx_cqe_next_page *nextpg;
4255 nextpg = (struct eth_rx_cqe_next_page *)
4256 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4258 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4259 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4261 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4262 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4266 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4268 union host_hc_status_block *sb;
4269 struct bnx2x_fastpath *fp = &bp->fp[index];
4272 int rx_ring_size = 0;
4274 if (!bp->rx_ring_size &&
4275 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
4276 rx_ring_size = MIN_RX_SIZE_NONTPA;
4277 bp->rx_ring_size = rx_ring_size;
4278 } else if (!bp->rx_ring_size) {
4279 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4281 if (CHIP_IS_E3(bp)) {
4282 u32 cfg = SHMEM_RD(bp,
4283 dev_info.port_hw_config[BP_PORT(bp)].
4286 /* Decrease ring size for 1G functions */
4287 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4288 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4292 /* allocate at least number of buffers required by FW */
4293 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4294 MIN_RX_SIZE_TPA, rx_ring_size);
4296 bp->rx_ring_size = rx_ring_size;
4297 } else /* if rx_ring_size specified - use it */
4298 rx_ring_size = bp->rx_ring_size;
4300 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4303 sb = &bnx2x_fp(bp, index, status_blk);
4305 if (!IS_FCOE_IDX(index)) {
4307 if (!CHIP_IS_E1x(bp))
4308 BNX2X_PCI_ALLOC(sb->e2_sb,
4309 &bnx2x_fp(bp, index, status_blk_mapping),
4310 sizeof(struct host_hc_status_block_e2));
4312 BNX2X_PCI_ALLOC(sb->e1x_sb,
4313 &bnx2x_fp(bp, index, status_blk_mapping),
4314 sizeof(struct host_hc_status_block_e1x));
4317 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4318 * set shortcuts for it.
4320 if (!IS_FCOE_IDX(index))
4321 set_sb_shortcuts(bp, index);
4324 if (!skip_tx_queue(bp, index)) {
4325 /* fastpath tx rings: tx_buf tx_desc */
4326 for_each_cos_in_tx_queue(fp, cos) {
4327 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4330 "allocating tx memory of fp %d cos %d\n",
4333 BNX2X_ALLOC(txdata->tx_buf_ring,
4334 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4335 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4336 &txdata->tx_desc_mapping,
4337 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4342 if (!skip_rx_queue(bp, index)) {
4343 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4344 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4345 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4346 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4347 &bnx2x_fp(bp, index, rx_desc_mapping),
4348 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4350 /* Seed all CQEs by 1s */
4351 BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4352 &bnx2x_fp(bp, index, rx_comp_mapping),
4353 sizeof(struct eth_fast_path_rx_cqe) *
4357 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4358 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4359 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4360 &bnx2x_fp(bp, index, rx_sge_mapping),
4361 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4363 bnx2x_set_next_page_rx_bd(fp);
4366 bnx2x_set_next_page_rx_cq(fp);
4369 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4370 if (ring_size < rx_ring_size)
4376 /* handles low memory cases */
4378 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4380 /* FW will drop all packets if queue is not big enough,
4381 * In these cases we disable the queue
4382 * Min size is different for OOO, TPA and non-TPA queues
4384 if (ring_size < (fp->disable_tpa ?
4385 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4386 /* release memory allocated for this queue */
4387 bnx2x_free_fp_mem_at(bp, index);
4393 int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4397 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4398 /* we will fail load process instead of mark
4406 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4410 /* 1. Allocate FP for leading - fatal if error
4411 * 2. Allocate RSS - fix number of queues if error
4415 if (bnx2x_alloc_fp_mem_at(bp, 0))
4419 for_each_nondefault_eth_queue(bp, i)
4420 if (bnx2x_alloc_fp_mem_at(bp, i))
4423 /* handle memory failures */
4424 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4425 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4428 bnx2x_shrink_eth_fp(bp, delta);
4429 if (CNIC_SUPPORT(bp))
4430 /* move non eth FPs next to last eth FP
4431 * must be done in that order
4432 * FCOE_IDX < FWD_IDX < OOO_IDX
4435 /* move FCoE fp even NO_FCOE_FLAG is on */
4436 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4437 bp->num_ethernet_queues -= delta;
4438 bp->num_queues = bp->num_ethernet_queues +
4439 bp->num_cnic_queues;
4440 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4441 bp->num_queues + delta, bp->num_queues);
4447 void bnx2x_free_mem_bp(struct bnx2x *bp)
4451 for (i = 0; i < bp->fp_array_size; i++)
4452 kfree(bp->fp[i].tpa_info);
4455 kfree(bp->fp_stats);
4456 kfree(bp->bnx2x_txq);
4457 kfree(bp->msix_table);
4461 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4463 struct bnx2x_fastpath *fp;
4464 struct msix_entry *tbl;
4465 struct bnx2x_ilt *ilt;
4466 int msix_table_size = 0;
4467 int fp_array_size, txq_array_size;
4471 * The biggest MSI-X table we might need is as a maximum number of fast
4472 * path IGU SBs plus default SB (for PF only).
4474 msix_table_size = bp->igu_sb_cnt;
4477 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4479 /* fp array: RSS plus CNIC related L2 queues */
4480 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4481 bp->fp_array_size = fp_array_size;
4482 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4484 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4487 for (i = 0; i < bp->fp_array_size; i++) {
4489 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4490 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4491 if (!(fp[i].tpa_info))
4497 /* allocate sp objs */
4498 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4503 /* allocate fp_stats */
4504 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4509 /* Allocate memory for the transmission queues array */
4511 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4512 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4514 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4520 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4523 bp->msix_table = tbl;
4526 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4533 bnx2x_free_mem_bp(bp);
4537 int bnx2x_reload_if_running(struct net_device *dev)
4539 struct bnx2x *bp = netdev_priv(dev);
4541 if (unlikely(!netif_running(dev)))
4544 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4545 return bnx2x_nic_load(bp, LOAD_NORMAL);
4548 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4550 u32 sel_phy_idx = 0;
4551 if (bp->link_params.num_phys <= 1)
4554 if (bp->link_vars.link_up) {
4555 sel_phy_idx = EXT_PHY1;
4556 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4557 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4558 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4559 sel_phy_idx = EXT_PHY2;
4562 switch (bnx2x_phy_selection(&bp->link_params)) {
4563 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4564 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4565 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4566 sel_phy_idx = EXT_PHY1;
4568 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4569 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4570 sel_phy_idx = EXT_PHY2;
4577 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4579 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4581 * The selected activated PHY is always after swapping (in case PHY
4582 * swapping is enabled). So when swapping is enabled, we need to reverse
4586 if (bp->link_params.multi_phy_config &
4587 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4588 if (sel_phy_idx == EXT_PHY1)
4589 sel_phy_idx = EXT_PHY2;
4590 else if (sel_phy_idx == EXT_PHY2)
4591 sel_phy_idx = EXT_PHY1;
4593 return LINK_CONFIG_IDX(sel_phy_idx);
4596 #ifdef NETDEV_FCOE_WWNN
4597 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4599 struct bnx2x *bp = netdev_priv(dev);
4600 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4603 case NETDEV_FCOE_WWNN:
4604 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4605 cp->fcoe_wwn_node_name_lo);
4607 case NETDEV_FCOE_WWPN:
4608 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4609 cp->fcoe_wwn_port_name_lo);
4612 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4620 /* called with rtnl_lock */
4621 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4623 struct bnx2x *bp = netdev_priv(dev);
4625 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4626 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4630 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4631 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4632 BNX2X_ERR("Can't support requested MTU size\n");
4636 /* This does not race with packet allocation
4637 * because the actual alloc size is
4638 * only updated as part of load
4642 return bnx2x_reload_if_running(dev);
4645 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4646 netdev_features_t features)
4648 struct bnx2x *bp = netdev_priv(dev);
4650 /* TPA requires Rx CSUM offloading */
4651 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
4652 features &= ~NETIF_F_LRO;
4653 features &= ~NETIF_F_GRO;
4659 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4661 struct bnx2x *bp = netdev_priv(dev);
4662 u32 flags = bp->flags;
4664 bool bnx2x_reload = false;
4666 if (features & NETIF_F_LRO)
4667 flags |= TPA_ENABLE_FLAG;
4669 flags &= ~TPA_ENABLE_FLAG;
4671 if (features & NETIF_F_GRO)
4672 flags |= GRO_ENABLE_FLAG;
4674 flags &= ~GRO_ENABLE_FLAG;
4676 if (features & NETIF_F_LOOPBACK) {
4677 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4678 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4679 bnx2x_reload = true;
4682 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4683 bp->link_params.loopback_mode = LOOPBACK_NONE;
4684 bnx2x_reload = true;
4688 changes = flags ^ bp->flags;
4690 /* if GRO is changed while LRO is enabled, don't force a reload */
4691 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4692 changes &= ~GRO_ENABLE_FLAG;
4695 bnx2x_reload = true;
4700 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4701 return bnx2x_reload_if_running(dev);
4702 /* else: bnx2x_nic_load() will be called at end of recovery */
4708 void bnx2x_tx_timeout(struct net_device *dev)
4710 struct bnx2x *bp = netdev_priv(dev);
4712 #ifdef BNX2X_STOP_ON_ERROR
4717 smp_mb__before_clear_bit();
4718 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4719 smp_mb__after_clear_bit();
4721 /* This allows the netif to be shutdown gracefully before resetting */
4722 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4725 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4727 struct net_device *dev = pci_get_drvdata(pdev);
4731 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4734 bp = netdev_priv(dev);
4738 pci_save_state(pdev);
4740 if (!netif_running(dev)) {
4745 netif_device_detach(dev);
4747 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4749 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4756 int bnx2x_resume(struct pci_dev *pdev)
4758 struct net_device *dev = pci_get_drvdata(pdev);
4763 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4766 bp = netdev_priv(dev);
4768 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4769 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4775 pci_restore_state(pdev);
4777 if (!netif_running(dev)) {
4782 bnx2x_set_power_state(bp, PCI_D0);
4783 netif_device_attach(dev);
4785 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4792 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4796 BNX2X_ERR("bad context pointer %p\n", cxt);
4800 /* ustorm cxt validation */
4801 cxt->ustorm_ag_context.cdu_usage =
4802 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4803 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4804 /* xcontext validation */
4805 cxt->xstorm_ag_context.cdu_reserved =
4806 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4807 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4810 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4811 u8 fw_sb_id, u8 sb_index,
4814 u32 addr = BAR_CSTRORM_INTMEM +
4815 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4816 REG_WR8(bp, addr, ticks);
4818 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4819 port, fw_sb_id, sb_index, ticks);
4822 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4823 u16 fw_sb_id, u8 sb_index,
4826 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4827 u32 addr = BAR_CSTRORM_INTMEM +
4828 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4829 u8 flags = REG_RD8(bp, addr);
4831 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4832 flags |= enable_flag;
4833 REG_WR8(bp, addr, flags);
4835 "port %x fw_sb_id %d sb_index %d disable %d\n",
4836 port, fw_sb_id, sb_index, disable);
4839 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4840 u8 sb_index, u8 disable, u16 usec)
4842 int port = BP_PORT(bp);
4843 u8 ticks = usec / BNX2X_BTR;
4845 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4847 disable = disable ? 1 : (usec ? 0 : 1);
4848 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);