1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2013 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
26 #include <net/ip6_checksum.h>
27 #include <net/busy_poll.h>
28 #include <linux/prefetch.h>
29 #include "bnx2x_cmn.h"
30 #include "bnx2x_init.h"
34 * bnx2x_move_fp - move content of the fastpath structure.
37 * @from: source FP index
38 * @to: destination FP index
40 * Makes sure the contents of the bp->fp[to].napi is kept
41 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire
43 * source onto the target. Update txdata pointers and related
46 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
50 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
51 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
52 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
54 int old_max_eth_txqs, new_max_eth_txqs;
55 int old_txdata_index = 0, new_txdata_index = 0;
56 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
58 /* Copy the NAPI object as it has been already initialized */
59 from_fp->napi = to_fp->napi;
61 /* Move bnx2x_fastpath contents */
62 memcpy(to_fp, from_fp, sizeof(*to_fp));
65 /* Retain the tpa_info of the original `to' version as we don't want
66 * 2 FPs to contain the same tpa_info pointer.
68 to_fp->tpa_info = old_tpa_info;
70 /* move sp_objs contents as well, as their indices match fp ones */
71 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
73 /* move fp_stats contents as well, as their indices match fp ones */
74 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
76 /* Update txdata pointers in fp and move txdata content accordingly:
77 * Each fp consumes 'max_cos' txdata structures, so the index should be
78 * decremented by max_cos x delta.
81 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
82 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
84 if (from == FCOE_IDX(bp)) {
85 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
86 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
89 memcpy(&bp->bnx2x_txq[new_txdata_index],
90 &bp->bnx2x_txq[old_txdata_index],
91 sizeof(struct bnx2x_fp_txdata));
92 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
96 * bnx2x_fill_fw_str - Fill buffer with FW version string.
99 * @buf: character buffer to fill with the fw name
100 * @buf_len: length of the above buffer
103 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
106 u8 phy_fw_ver[PHY_FW_VER_LEN];
108 phy_fw_ver[0] = '\0';
109 bnx2x_get_ext_phy_fw_version(&bp->link_params,
110 phy_fw_ver, PHY_FW_VER_LEN);
111 strlcpy(buf, bp->fw_ver, buf_len);
112 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
114 (bp->common.bc_ver & 0xff0000) >> 16,
115 (bp->common.bc_ver & 0xff00) >> 8,
116 (bp->common.bc_ver & 0xff),
117 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
119 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
124 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
127 * @delta: number of eth queues which were not allocated
129 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
131 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
133 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
134 * backward along the array could cause memory to be overridden
136 for (cos = 1; cos < bp->max_cos; cos++) {
137 for (i = 0; i < old_eth_num - delta; i++) {
138 struct bnx2x_fastpath *fp = &bp->fp[i];
139 int new_idx = cos * (old_eth_num - delta) + i;
141 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
142 sizeof(struct bnx2x_fp_txdata));
143 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
148 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
150 /* free skb in the packet ring at pos idx
151 * return idx of last bd freed
153 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
154 u16 idx, unsigned int *pkts_compl,
155 unsigned int *bytes_compl)
157 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
158 struct eth_tx_start_bd *tx_start_bd;
159 struct eth_tx_bd *tx_data_bd;
160 struct sk_buff *skb = tx_buf->skb;
161 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
164 /* prefetch skb end pointer to speedup dev_kfree_skb() */
167 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
168 txdata->txq_index, idx, tx_buf, skb);
171 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
172 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
173 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
175 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
176 #ifdef BNX2X_STOP_ON_ERROR
177 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
178 BNX2X_ERR("BAD nbd!\n");
182 new_cons = nbd + tx_buf->first_bd;
184 /* Get the next bd */
185 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
187 /* Skip a parse bd... */
189 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
191 /* ...and the TSO split header bd since they have no mapping */
192 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
194 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
200 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
201 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
202 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
204 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
211 (*bytes_compl) += skb->len;
214 dev_kfree_skb_any(skb);
215 tx_buf->first_bd = 0;
221 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
223 struct netdev_queue *txq;
224 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
225 unsigned int pkts_compl = 0, bytes_compl = 0;
227 #ifdef BNX2X_STOP_ON_ERROR
228 if (unlikely(bp->panic))
232 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
233 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
234 sw_cons = txdata->tx_pkt_cons;
236 while (sw_cons != hw_cons) {
239 pkt_cons = TX_BD(sw_cons);
241 DP(NETIF_MSG_TX_DONE,
242 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
243 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
245 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
246 &pkts_compl, &bytes_compl);
251 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
253 txdata->tx_pkt_cons = sw_cons;
254 txdata->tx_bd_cons = bd_cons;
256 /* Need to make the tx_bd_cons update visible to start_xmit()
257 * before checking for netif_tx_queue_stopped(). Without the
258 * memory barrier, there is a small possibility that
259 * start_xmit() will miss it and cause the queue to be stopped
261 * On the other hand we need an rmb() here to ensure the proper
262 * ordering of bit testing in the following
263 * netif_tx_queue_stopped(txq) call.
267 if (unlikely(netif_tx_queue_stopped(txq))) {
268 /* Taking tx_lock() is needed to prevent re-enabling the queue
269 * while it's empty. This could have happen if rx_action() gets
270 * suspended in bnx2x_tx_int() after the condition before
271 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
273 * stops the queue->sees fresh tx_bd_cons->releases the queue->
274 * sends some packets consuming the whole queue again->
278 __netif_tx_lock(txq, smp_processor_id());
280 if ((netif_tx_queue_stopped(txq)) &&
281 (bp->state == BNX2X_STATE_OPEN) &&
282 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
283 netif_tx_wake_queue(txq);
285 __netif_tx_unlock(txq);
290 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
293 u16 last_max = fp->last_max_sge;
295 if (SUB_S16(idx, last_max) > 0)
296 fp->last_max_sge = idx;
299 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
301 struct eth_end_agg_rx_cqe *cqe)
303 struct bnx2x *bp = fp->bp;
304 u16 last_max, last_elem, first_elem;
311 /* First mark all used pages */
312 for (i = 0; i < sge_len; i++)
313 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
314 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
316 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
317 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
319 /* Here we assume that the last SGE index is the biggest */
320 prefetch((void *)(fp->sge_mask));
321 bnx2x_update_last_max_sge(fp,
322 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
324 last_max = RX_SGE(fp->last_max_sge);
325 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
326 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
328 /* If ring is not full */
329 if (last_elem + 1 != first_elem)
332 /* Now update the prod */
333 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
334 if (likely(fp->sge_mask[i]))
337 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
338 delta += BIT_VEC64_ELEM_SZ;
342 fp->rx_sge_prod += delta;
343 /* clear page-end entries */
344 bnx2x_clear_sge_mask_next_elems(fp);
347 DP(NETIF_MSG_RX_STATUS,
348 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
349 fp->last_max_sge, fp->rx_sge_prod);
352 /* Get Toeplitz hash value in the skb using the value from the
353 * CQE (calculated by HW).
355 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
356 const struct eth_fast_path_rx_cqe *cqe,
359 /* Get Toeplitz hash from CQE */
360 if ((bp->dev->features & NETIF_F_RXHASH) &&
361 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
362 enum eth_rss_hash_type htype;
364 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
365 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
366 (htype == TCP_IPV6_HASH_TYPE);
367 return le32_to_cpu(cqe->rss_hash_result);
373 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
375 struct eth_fast_path_rx_cqe *cqe)
377 struct bnx2x *bp = fp->bp;
378 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
379 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
380 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
382 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
383 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
385 /* print error if current state != stop */
386 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
387 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
389 /* Try to map an empty data buffer from the aggregation info */
390 mapping = dma_map_single(&bp->pdev->dev,
391 first_buf->data + NET_SKB_PAD,
392 fp->rx_buf_size, DMA_FROM_DEVICE);
394 * ...if it fails - move the skb from the consumer to the producer
395 * and set the current aggregation state as ERROR to drop it
396 * when TPA_STOP arrives.
399 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
400 /* Move the BD from the consumer to the producer */
401 bnx2x_reuse_rx_data(fp, cons, prod);
402 tpa_info->tpa_state = BNX2X_TPA_ERROR;
406 /* move empty data from pool to prod */
407 prod_rx_buf->data = first_buf->data;
408 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
409 /* point prod_bd to new data */
410 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
411 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
413 /* move partial skb from cons to pool (don't unmap yet) */
414 *first_buf = *cons_rx_buf;
416 /* mark bin state as START */
417 tpa_info->parsing_flags =
418 le16_to_cpu(cqe->pars_flags.flags);
419 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
420 tpa_info->tpa_state = BNX2X_TPA_START;
421 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
422 tpa_info->placement_offset = cqe->placement_offset;
423 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
424 if (fp->mode == TPA_MODE_GRO) {
425 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
426 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
427 tpa_info->gro_size = gro_size;
430 #ifdef BNX2X_STOP_ON_ERROR
431 fp->tpa_queue_used |= (1 << queue);
432 #ifdef _ASM_GENERIC_INT_L64_H
433 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
435 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
441 /* Timestamp option length allowed for TPA aggregation:
443 * nop nop kind length echo val
445 #define TPA_TSTAMP_OPT_LEN 12
447 * bnx2x_set_gro_params - compute GRO values
450 * @parsing_flags: parsing flags from the START CQE
451 * @len_on_bd: total length of the first packet for the
453 * @pkt_len: length of all segments
455 * Approximate value of the MSS for this aggregation calculated using
456 * the first packet of it.
457 * Compute number of aggregated segments, and gso_type.
459 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
460 u16 len_on_bd, unsigned int pkt_len,
461 u16 num_of_coalesced_segs)
463 /* TPA aggregation won't have either IP options or TCP options
464 * other than timestamp or IPv6 extension headers.
466 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
468 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
469 PRS_FLAG_OVERETH_IPV6) {
470 hdrs_len += sizeof(struct ipv6hdr);
471 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
473 hdrs_len += sizeof(struct iphdr);
474 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
477 /* Check if there was a TCP timestamp, if there is it's will
478 * always be 12 bytes length: nop nop kind length echo val.
480 * Otherwise FW would close the aggregation.
482 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
483 hdrs_len += TPA_TSTAMP_OPT_LEN;
485 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
487 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
488 * to skb_shinfo(skb)->gso_segs
490 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
493 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
494 u16 index, gfp_t gfp_mask)
496 struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
497 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
498 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
501 if (unlikely(page == NULL)) {
502 BNX2X_ERR("Can't alloc sge\n");
506 mapping = dma_map_page(&bp->pdev->dev, page, 0,
507 SGE_PAGES, DMA_FROM_DEVICE);
508 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
509 __free_pages(page, PAGES_PER_SGE_SHIFT);
510 BNX2X_ERR("Can't map sge\n");
515 dma_unmap_addr_set(sw_buf, mapping, mapping);
517 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
518 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
523 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
524 struct bnx2x_agg_info *tpa_info,
527 struct eth_end_agg_rx_cqe *cqe,
530 struct sw_rx_page *rx_pg, old_rx_pg;
531 u32 i, frag_len, frag_size;
532 int err, j, frag_id = 0;
533 u16 len_on_bd = tpa_info->len_on_bd;
534 u16 full_page = 0, gro_size = 0;
536 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
538 if (fp->mode == TPA_MODE_GRO) {
539 gro_size = tpa_info->gro_size;
540 full_page = tpa_info->full_page;
543 /* This is needed in order to enable forwarding support */
545 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
546 le16_to_cpu(cqe->pkt_len),
547 le16_to_cpu(cqe->num_of_coalesced_segs));
549 #ifdef BNX2X_STOP_ON_ERROR
550 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
551 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
553 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
559 /* Run through the SGL and compose the fragmented skb */
560 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
561 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
563 /* FW gives the indices of the SGE as if the ring is an array
564 (meaning that "next" element will consume 2 indices) */
565 if (fp->mode == TPA_MODE_GRO)
566 frag_len = min_t(u32, frag_size, (u32)full_page);
568 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
570 rx_pg = &fp->rx_page_ring[sge_idx];
573 /* If we fail to allocate a substitute page, we simply stop
574 where we are and drop the whole packet */
575 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
577 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
581 /* Unmap the page as we're going to pass it to the stack */
582 dma_unmap_page(&bp->pdev->dev,
583 dma_unmap_addr(&old_rx_pg, mapping),
584 SGE_PAGES, DMA_FROM_DEVICE);
585 /* Add one frag and update the appropriate fields in the skb */
586 if (fp->mode == TPA_MODE_LRO)
587 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
591 for (rem = frag_len; rem > 0; rem -= gro_size) {
592 int len = rem > gro_size ? gro_size : rem;
593 skb_fill_page_desc(skb, frag_id++,
594 old_rx_pg.page, offset, len);
596 get_page(old_rx_pg.page);
601 skb->data_len += frag_len;
602 skb->truesize += SGE_PAGES;
603 skb->len += frag_len;
605 frag_size -= frag_len;
611 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
613 if (fp->rx_frag_size)
614 put_page(virt_to_head_page(data));
619 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
621 if (fp->rx_frag_size) {
622 /* GFP_KERNEL allocations are used only during initialization */
623 if (unlikely(gfp_mask & __GFP_WAIT))
624 return (void *)__get_free_page(gfp_mask);
626 return netdev_alloc_frag(fp->rx_frag_size);
629 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
633 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
635 const struct iphdr *iph = ip_hdr(skb);
638 skb_set_transport_header(skb, sizeof(struct iphdr));
641 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
642 iph->saddr, iph->daddr, 0);
645 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
647 struct ipv6hdr *iph = ipv6_hdr(skb);
650 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
653 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
654 &iph->saddr, &iph->daddr, 0);
657 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
658 void (*gro_func)(struct bnx2x*, struct sk_buff*))
660 skb_set_network_header(skb, 0);
662 tcp_gro_complete(skb);
666 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
670 if (skb_shinfo(skb)->gso_size) {
671 switch (be16_to_cpu(skb->protocol)) {
673 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
676 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
679 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
680 be16_to_cpu(skb->protocol));
684 skb_record_rx_queue(skb, fp->rx_queue);
685 napi_gro_receive(&fp->napi, skb);
688 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
689 struct bnx2x_agg_info *tpa_info,
691 struct eth_end_agg_rx_cqe *cqe,
694 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
695 u8 pad = tpa_info->placement_offset;
696 u16 len = tpa_info->len_on_bd;
697 struct sk_buff *skb = NULL;
698 u8 *new_data, *data = rx_buf->data;
699 u8 old_tpa_state = tpa_info->tpa_state;
701 tpa_info->tpa_state = BNX2X_TPA_STOP;
703 /* If we there was an error during the handling of the TPA_START -
704 * drop this aggregation.
706 if (old_tpa_state == BNX2X_TPA_ERROR)
709 /* Try to allocate the new data */
710 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
711 /* Unmap skb in the pool anyway, as we are going to change
712 pool entry status to BNX2X_TPA_STOP even if new skb allocation
714 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
715 fp->rx_buf_size, DMA_FROM_DEVICE);
716 if (likely(new_data))
717 skb = build_skb(data, fp->rx_frag_size);
720 #ifdef BNX2X_STOP_ON_ERROR
721 if (pad + len > fp->rx_buf_size) {
722 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
723 pad, len, fp->rx_buf_size);
729 skb_reserve(skb, pad + NET_SKB_PAD);
731 skb->rxhash = tpa_info->rxhash;
732 skb->l4_rxhash = tpa_info->l4_rxhash;
734 skb->protocol = eth_type_trans(skb, bp->dev);
735 skb->ip_summed = CHECKSUM_UNNECESSARY;
737 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
738 skb, cqe, cqe_idx)) {
739 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
740 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
741 bnx2x_gro_receive(bp, fp, skb);
743 DP(NETIF_MSG_RX_STATUS,
744 "Failed to allocate new pages - dropping packet!\n");
745 dev_kfree_skb_any(skb);
748 /* put new data in bin */
749 rx_buf->data = new_data;
753 bnx2x_frag_free(fp, new_data);
755 /* drop the packet and keep the buffer in the bin */
756 DP(NETIF_MSG_RX_STATUS,
757 "Failed to allocate or map a new skb - dropping packet!\n");
758 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
761 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
762 u16 index, gfp_t gfp_mask)
765 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
766 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
769 data = bnx2x_frag_alloc(fp, gfp_mask);
770 if (unlikely(data == NULL))
773 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
776 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
777 bnx2x_frag_free(fp, data);
778 BNX2X_ERR("Can't map rx data\n");
783 dma_unmap_addr_set(rx_buf, mapping, mapping);
785 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
786 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
792 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
793 struct bnx2x_fastpath *fp,
794 struct bnx2x_eth_q_stats *qstats)
796 /* Do nothing if no L4 csum validation was done.
797 * We do not check whether IP csum was validated. For IPv4 we assume
798 * that if the card got as far as validating the L4 csum, it also
799 * validated the IP csum. IPv6 has no IP csum.
801 if (cqe->fast_path_cqe.status_flags &
802 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
805 /* If L4 validation was done, check if an error was found. */
807 if (cqe->fast_path_cqe.type_error_flags &
808 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
809 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
810 qstats->hw_csum_err++;
812 skb->ip_summed = CHECKSUM_UNNECESSARY;
815 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
817 struct bnx2x *bp = fp->bp;
818 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
819 u16 sw_comp_cons, sw_comp_prod;
821 union eth_rx_cqe *cqe;
822 struct eth_fast_path_rx_cqe *cqe_fp;
824 #ifdef BNX2X_STOP_ON_ERROR
825 if (unlikely(bp->panic))
829 bd_cons = fp->rx_bd_cons;
830 bd_prod = fp->rx_bd_prod;
831 bd_prod_fw = bd_prod;
832 sw_comp_cons = fp->rx_comp_cons;
833 sw_comp_prod = fp->rx_comp_prod;
835 comp_ring_cons = RCQ_BD(sw_comp_cons);
836 cqe = &fp->rx_comp_ring[comp_ring_cons];
837 cqe_fp = &cqe->fast_path_cqe;
839 DP(NETIF_MSG_RX_STATUS,
840 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
842 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
843 struct sw_rx_bd *rx_buf = NULL;
846 enum eth_rx_cqe_type cqe_fp_type;
851 #ifdef BNX2X_STOP_ON_ERROR
852 if (unlikely(bp->panic))
856 bd_prod = RX_BD(bd_prod);
857 bd_cons = RX_BD(bd_cons);
859 cqe_fp_flags = cqe_fp->type_error_flags;
860 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
862 DP(NETIF_MSG_RX_STATUS,
863 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
864 CQE_TYPE(cqe_fp_flags),
865 cqe_fp_flags, cqe_fp->status_flags,
866 le32_to_cpu(cqe_fp->rss_hash_result),
867 le16_to_cpu(cqe_fp->vlan_tag),
868 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
870 /* is this a slowpath msg? */
871 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
872 bnx2x_sp_event(fp, cqe);
876 rx_buf = &fp->rx_buf_ring[bd_cons];
879 if (!CQE_TYPE_FAST(cqe_fp_type)) {
880 struct bnx2x_agg_info *tpa_info;
881 u16 frag_size, pages;
882 #ifdef BNX2X_STOP_ON_ERROR
884 if (fp->disable_tpa &&
885 (CQE_TYPE_START(cqe_fp_type) ||
886 CQE_TYPE_STOP(cqe_fp_type)))
887 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
888 CQE_TYPE(cqe_fp_type));
891 if (CQE_TYPE_START(cqe_fp_type)) {
892 u16 queue = cqe_fp->queue_index;
893 DP(NETIF_MSG_RX_STATUS,
894 "calling tpa_start on queue %d\n",
897 bnx2x_tpa_start(fp, queue,
903 queue = cqe->end_agg_cqe.queue_index;
904 tpa_info = &fp->tpa_info[queue];
905 DP(NETIF_MSG_RX_STATUS,
906 "calling tpa_stop on queue %d\n",
909 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
912 if (fp->mode == TPA_MODE_GRO)
913 pages = (frag_size + tpa_info->full_page - 1) /
916 pages = SGE_PAGE_ALIGN(frag_size) >>
919 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
920 &cqe->end_agg_cqe, comp_ring_cons);
921 #ifdef BNX2X_STOP_ON_ERROR
926 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
930 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
931 pad = cqe_fp->placement_offset;
932 dma_sync_single_for_cpu(&bp->pdev->dev,
933 dma_unmap_addr(rx_buf, mapping),
934 pad + RX_COPY_THRESH,
937 prefetch(data + pad); /* speedup eth_type_trans() */
938 /* is this an error packet? */
939 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
940 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
941 "ERROR flags %x rx packet %u\n",
942 cqe_fp_flags, sw_comp_cons);
943 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
947 /* Since we don't have a jumbo ring
948 * copy small packets if mtu > 1500
950 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
951 (len <= RX_COPY_THRESH)) {
952 skb = netdev_alloc_skb_ip_align(bp->dev, len);
954 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
955 "ERROR packet dropped because of alloc failure\n");
956 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
959 memcpy(skb->data, data + pad, len);
960 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
962 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
964 dma_unmap_single(&bp->pdev->dev,
965 dma_unmap_addr(rx_buf, mapping),
968 skb = build_skb(data, fp->rx_frag_size);
969 if (unlikely(!skb)) {
970 bnx2x_frag_free(fp, data);
971 bnx2x_fp_qstats(bp, fp)->
972 rx_skb_alloc_failed++;
975 skb_reserve(skb, pad);
977 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
978 "ERROR packet dropped because of alloc failure\n");
979 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
981 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
987 skb->protocol = eth_type_trans(skb, bp->dev);
989 /* Set Toeplitz hash for a none-LRO skb */
990 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
991 skb->l4_rxhash = l4_rxhash;
993 skb_checksum_none_assert(skb);
995 if (bp->dev->features & NETIF_F_RXCSUM)
996 bnx2x_csum_validate(skb, cqe, fp,
997 bnx2x_fp_qstats(bp, fp));
999 skb_record_rx_queue(skb, fp->rx_queue);
1001 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1003 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1004 le16_to_cpu(cqe_fp->vlan_tag));
1006 skb_mark_napi_id(skb, &fp->napi);
1008 if (bnx2x_fp_ll_polling(fp))
1009 netif_receive_skb(skb);
1011 napi_gro_receive(&fp->napi, skb);
1013 rx_buf->data = NULL;
1015 bd_cons = NEXT_RX_IDX(bd_cons);
1016 bd_prod = NEXT_RX_IDX(bd_prod);
1017 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1020 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1021 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1023 /* mark CQE as free */
1024 BNX2X_SEED_CQE(cqe_fp);
1026 if (rx_pkt == budget)
1029 comp_ring_cons = RCQ_BD(sw_comp_cons);
1030 cqe = &fp->rx_comp_ring[comp_ring_cons];
1031 cqe_fp = &cqe->fast_path_cqe;
1034 fp->rx_bd_cons = bd_cons;
1035 fp->rx_bd_prod = bd_prod_fw;
1036 fp->rx_comp_cons = sw_comp_cons;
1037 fp->rx_comp_prod = sw_comp_prod;
1039 /* Update producers */
1040 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1043 fp->rx_pkt += rx_pkt;
1049 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1051 struct bnx2x_fastpath *fp = fp_cookie;
1052 struct bnx2x *bp = fp->bp;
1056 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1057 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1059 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1061 #ifdef BNX2X_STOP_ON_ERROR
1062 if (unlikely(bp->panic))
1066 /* Handle Rx and Tx according to MSI-X vector */
1067 for_each_cos_in_tx_queue(fp, cos)
1068 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1070 prefetch(&fp->sb_running_index[SM_RX_ID]);
1071 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1076 /* HW Lock for shared dual port PHYs */
1077 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1079 mutex_lock(&bp->port.phy_mutex);
1081 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1084 void bnx2x_release_phy_lock(struct bnx2x *bp)
1086 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1088 mutex_unlock(&bp->port.phy_mutex);
1091 /* calculates MF speed according to current linespeed and MF configuration */
1092 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1094 u16 line_speed = bp->link_vars.line_speed;
1096 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1097 bp->mf_config[BP_VN(bp)]);
1099 /* Calculate the current MAX line speed limit for the MF
1103 line_speed = (line_speed * maxCfg) / 100;
1104 else { /* SD mode */
1105 u16 vn_max_rate = maxCfg * 100;
1107 if (vn_max_rate < line_speed)
1108 line_speed = vn_max_rate;
1116 * bnx2x_fill_report_data - fill link report data to report
1118 * @bp: driver handle
1119 * @data: link state to update
1121 * It uses a none-atomic bit operations because is called under the mutex.
1123 static void bnx2x_fill_report_data(struct bnx2x *bp,
1124 struct bnx2x_link_report_data *data)
1126 u16 line_speed = bnx2x_get_mf_speed(bp);
1128 memset(data, 0, sizeof(*data));
1130 /* Fill the report data: effective line speed */
1131 data->line_speed = line_speed;
1134 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1135 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1136 &data->link_report_flags);
1139 if (bp->link_vars.duplex == DUPLEX_FULL)
1140 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1142 /* Rx Flow Control is ON */
1143 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1144 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1146 /* Tx Flow Control is ON */
1147 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1148 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1152 * bnx2x_link_report - report link status to OS.
1154 * @bp: driver handle
1156 * Calls the __bnx2x_link_report() under the same locking scheme
1157 * as a link/PHY state managing code to ensure a consistent link
1161 void bnx2x_link_report(struct bnx2x *bp)
1163 bnx2x_acquire_phy_lock(bp);
1164 __bnx2x_link_report(bp);
1165 bnx2x_release_phy_lock(bp);
1169 * __bnx2x_link_report - report link status to OS.
1171 * @bp: driver handle
1173 * None atomic implementation.
1174 * Should be called under the phy_lock.
1176 void __bnx2x_link_report(struct bnx2x *bp)
1178 struct bnx2x_link_report_data cur_data;
1181 if (IS_PF(bp) && !CHIP_IS_E1(bp))
1182 bnx2x_read_mf_cfg(bp);
1184 /* Read the current link report info */
1185 bnx2x_fill_report_data(bp, &cur_data);
1187 /* Don't report link down or exactly the same link status twice */
1188 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1189 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1190 &bp->last_reported_link.link_report_flags) &&
1191 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1192 &cur_data.link_report_flags)))
1197 /* We are going to report a new link parameters now -
1198 * remember the current data for the next time.
1200 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1202 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1203 &cur_data.link_report_flags)) {
1204 netif_carrier_off(bp->dev);
1205 netdev_err(bp->dev, "NIC Link is Down\n");
1211 netif_carrier_on(bp->dev);
1213 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1214 &cur_data.link_report_flags))
1219 /* Handle the FC at the end so that only these flags would be
1220 * possibly set. This way we may easily check if there is no FC
1223 if (cur_data.link_report_flags) {
1224 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1225 &cur_data.link_report_flags)) {
1226 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1227 &cur_data.link_report_flags))
1228 flow = "ON - receive & transmit";
1230 flow = "ON - receive";
1232 flow = "ON - transmit";
1237 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1238 cur_data.line_speed, duplex, flow);
1242 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1246 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1247 struct eth_rx_sge *sge;
1249 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1251 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1252 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1255 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1256 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1260 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1261 struct bnx2x_fastpath *fp, int last)
1265 for (i = 0; i < last; i++) {
1266 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1267 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1268 u8 *data = first_buf->data;
1271 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1274 if (tpa_info->tpa_state == BNX2X_TPA_START)
1275 dma_unmap_single(&bp->pdev->dev,
1276 dma_unmap_addr(first_buf, mapping),
1277 fp->rx_buf_size, DMA_FROM_DEVICE);
1278 bnx2x_frag_free(fp, data);
1279 first_buf->data = NULL;
1283 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1287 for_each_rx_queue_cnic(bp, j) {
1288 struct bnx2x_fastpath *fp = &bp->fp[j];
1292 /* Activate BD ring */
1294 * this will generate an interrupt (to the TSTORM)
1295 * must only be done after chip is initialized
1297 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1302 void bnx2x_init_rx_rings(struct bnx2x *bp)
1304 int func = BP_FUNC(bp);
1308 /* Allocate TPA resources */
1309 for_each_eth_queue(bp, j) {
1310 struct bnx2x_fastpath *fp = &bp->fp[j];
1313 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1315 if (!fp->disable_tpa) {
1316 /* Fill the per-aggregation pool */
1317 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1318 struct bnx2x_agg_info *tpa_info =
1320 struct sw_rx_bd *first_buf =
1321 &tpa_info->first_buf;
1324 bnx2x_frag_alloc(fp, GFP_KERNEL);
1325 if (!first_buf->data) {
1326 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1328 bnx2x_free_tpa_pool(bp, fp, i);
1329 fp->disable_tpa = 1;
1332 dma_unmap_addr_set(first_buf, mapping, 0);
1333 tpa_info->tpa_state = BNX2X_TPA_STOP;
1336 /* "next page" elements initialization */
1337 bnx2x_set_next_page_sgl(fp);
1339 /* set SGEs bit mask */
1340 bnx2x_init_sge_ring_bit_mask(fp);
1342 /* Allocate SGEs and initialize the ring elements */
1343 for (i = 0, ring_prod = 0;
1344 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1346 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1348 BNX2X_ERR("was only able to allocate %d rx sges\n",
1350 BNX2X_ERR("disabling TPA for queue[%d]\n",
1352 /* Cleanup already allocated elements */
1353 bnx2x_free_rx_sge_range(bp, fp,
1355 bnx2x_free_tpa_pool(bp, fp,
1357 fp->disable_tpa = 1;
1361 ring_prod = NEXT_SGE_IDX(ring_prod);
1364 fp->rx_sge_prod = ring_prod;
1368 for_each_eth_queue(bp, j) {
1369 struct bnx2x_fastpath *fp = &bp->fp[j];
1373 /* Activate BD ring */
1375 * this will generate an interrupt (to the TSTORM)
1376 * must only be done after chip is initialized
1378 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1384 if (CHIP_IS_E1(bp)) {
1385 REG_WR(bp, BAR_USTRORM_INTMEM +
1386 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1387 U64_LO(fp->rx_comp_mapping));
1388 REG_WR(bp, BAR_USTRORM_INTMEM +
1389 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1390 U64_HI(fp->rx_comp_mapping));
1395 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1398 struct bnx2x *bp = fp->bp;
1400 for_each_cos_in_tx_queue(fp, cos) {
1401 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1402 unsigned pkts_compl = 0, bytes_compl = 0;
1404 u16 sw_prod = txdata->tx_pkt_prod;
1405 u16 sw_cons = txdata->tx_pkt_cons;
1407 while (sw_cons != sw_prod) {
1408 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1409 &pkts_compl, &bytes_compl);
1413 netdev_tx_reset_queue(
1414 netdev_get_tx_queue(bp->dev,
1415 txdata->txq_index));
1419 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1423 for_each_tx_queue_cnic(bp, i) {
1424 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1428 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1432 for_each_eth_queue(bp, i) {
1433 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1437 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1439 struct bnx2x *bp = fp->bp;
1442 /* ring wasn't allocated */
1443 if (fp->rx_buf_ring == NULL)
1446 for (i = 0; i < NUM_RX_BD; i++) {
1447 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1448 u8 *data = rx_buf->data;
1452 dma_unmap_single(&bp->pdev->dev,
1453 dma_unmap_addr(rx_buf, mapping),
1454 fp->rx_buf_size, DMA_FROM_DEVICE);
1456 rx_buf->data = NULL;
1457 bnx2x_frag_free(fp, data);
1461 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1465 for_each_rx_queue_cnic(bp, j) {
1466 bnx2x_free_rx_bds(&bp->fp[j]);
1470 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1474 for_each_eth_queue(bp, j) {
1475 struct bnx2x_fastpath *fp = &bp->fp[j];
1477 bnx2x_free_rx_bds(fp);
1479 if (!fp->disable_tpa)
1480 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1484 void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1486 bnx2x_free_tx_skbs_cnic(bp);
1487 bnx2x_free_rx_skbs_cnic(bp);
1490 void bnx2x_free_skbs(struct bnx2x *bp)
1492 bnx2x_free_tx_skbs(bp);
1493 bnx2x_free_rx_skbs(bp);
1496 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1498 /* load old values */
1499 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1501 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1502 /* leave all but MAX value */
1503 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1505 /* set new MAX value */
1506 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1507 & FUNC_MF_CFG_MAX_BW_MASK;
1509 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1514 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1516 * @bp: driver handle
1517 * @nvecs: number of vectors to be released
1519 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1523 if (nvecs == offset)
1526 /* VFs don't have a default SB */
1528 free_irq(bp->msix_table[offset].vector, bp->dev);
1529 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1530 bp->msix_table[offset].vector);
1534 if (CNIC_SUPPORT(bp)) {
1535 if (nvecs == offset)
1540 for_each_eth_queue(bp, i) {
1541 if (nvecs == offset)
1543 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1544 i, bp->msix_table[offset].vector);
1546 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1550 void bnx2x_free_irq(struct bnx2x *bp)
1552 if (bp->flags & USING_MSIX_FLAG &&
1553 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1554 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1556 /* vfs don't have a default status block */
1560 bnx2x_free_msix_irqs(bp, nvecs);
1562 free_irq(bp->dev->irq, bp->dev);
1566 int bnx2x_enable_msix(struct bnx2x *bp)
1568 int msix_vec = 0, i, rc;
1570 /* VFs don't have a default status block */
1572 bp->msix_table[msix_vec].entry = msix_vec;
1573 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1574 bp->msix_table[0].entry);
1578 /* Cnic requires an msix vector for itself */
1579 if (CNIC_SUPPORT(bp)) {
1580 bp->msix_table[msix_vec].entry = msix_vec;
1581 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1582 msix_vec, bp->msix_table[msix_vec].entry);
1586 /* We need separate vectors for ETH queues only (not FCoE) */
1587 for_each_eth_queue(bp, i) {
1588 bp->msix_table[msix_vec].entry = msix_vec;
1589 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1590 msix_vec, msix_vec, i);
1594 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1597 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
1600 * reconfigure number of tx/rx queues according to available
1603 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
1604 /* how less vectors we will have? */
1605 int diff = msix_vec - rc;
1607 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1609 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1612 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1616 * decrease number of queues by number of unallocated entries
1618 bp->num_ethernet_queues -= diff;
1619 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1621 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1623 } else if (rc > 0) {
1624 /* Get by with single vector */
1625 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1627 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1632 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1633 bp->flags |= USING_SINGLE_MSIX_FLAG;
1635 BNX2X_DEV_INFO("set number of queues to 1\n");
1636 bp->num_ethernet_queues = 1;
1637 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1638 } else if (rc < 0) {
1639 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1643 bp->flags |= USING_MSIX_FLAG;
1648 /* fall to INTx if not enough memory */
1650 bp->flags |= DISABLE_MSI_FLAG;
1655 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1657 int i, rc, offset = 0;
1659 /* no default status block for vf */
1661 rc = request_irq(bp->msix_table[offset++].vector,
1662 bnx2x_msix_sp_int, 0,
1663 bp->dev->name, bp->dev);
1665 BNX2X_ERR("request sp irq failed\n");
1670 if (CNIC_SUPPORT(bp))
1673 for_each_eth_queue(bp, i) {
1674 struct bnx2x_fastpath *fp = &bp->fp[i];
1675 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1678 rc = request_irq(bp->msix_table[offset].vector,
1679 bnx2x_msix_fp_int, 0, fp->name, fp);
1681 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1682 bp->msix_table[offset].vector, rc);
1683 bnx2x_free_msix_irqs(bp, offset);
1690 i = BNX2X_NUM_ETH_QUEUES(bp);
1692 offset = 1 + CNIC_SUPPORT(bp);
1693 netdev_info(bp->dev,
1694 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1695 bp->msix_table[0].vector,
1696 0, bp->msix_table[offset].vector,
1697 i - 1, bp->msix_table[offset + i - 1].vector);
1699 offset = CNIC_SUPPORT(bp);
1700 netdev_info(bp->dev,
1701 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1702 0, bp->msix_table[offset].vector,
1703 i - 1, bp->msix_table[offset + i - 1].vector);
1708 int bnx2x_enable_msi(struct bnx2x *bp)
1712 rc = pci_enable_msi(bp->pdev);
1714 BNX2X_DEV_INFO("MSI is not attainable\n");
1717 bp->flags |= USING_MSI_FLAG;
1722 static int bnx2x_req_irq(struct bnx2x *bp)
1724 unsigned long flags;
1727 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1730 flags = IRQF_SHARED;
1732 if (bp->flags & USING_MSIX_FLAG)
1733 irq = bp->msix_table[0].vector;
1735 irq = bp->pdev->irq;
1737 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1740 static int bnx2x_setup_irqs(struct bnx2x *bp)
1743 if (bp->flags & USING_MSIX_FLAG &&
1744 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1745 rc = bnx2x_req_msix_irqs(bp);
1749 rc = bnx2x_req_irq(bp);
1751 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1754 if (bp->flags & USING_MSI_FLAG) {
1755 bp->dev->irq = bp->pdev->irq;
1756 netdev_info(bp->dev, "using MSI IRQ %d\n",
1759 if (bp->flags & USING_MSIX_FLAG) {
1760 bp->dev->irq = bp->msix_table[0].vector;
1761 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1769 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1773 for_each_rx_queue_cnic(bp, i) {
1774 bnx2x_fp_init_lock(&bp->fp[i]);
1775 napi_enable(&bnx2x_fp(bp, i, napi));
1779 static void bnx2x_napi_enable(struct bnx2x *bp)
1783 for_each_eth_queue(bp, i) {
1784 bnx2x_fp_init_lock(&bp->fp[i]);
1785 napi_enable(&bnx2x_fp(bp, i, napi));
1789 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1794 for_each_rx_queue_cnic(bp, i) {
1795 napi_disable(&bnx2x_fp(bp, i, napi));
1796 while (!bnx2x_fp_lock_napi(&bp->fp[i]))
1802 static void bnx2x_napi_disable(struct bnx2x *bp)
1807 for_each_eth_queue(bp, i) {
1808 napi_disable(&bnx2x_fp(bp, i, napi));
1809 while (!bnx2x_fp_lock_napi(&bp->fp[i]))
1815 void bnx2x_netif_start(struct bnx2x *bp)
1817 if (netif_running(bp->dev)) {
1818 bnx2x_napi_enable(bp);
1819 if (CNIC_LOADED(bp))
1820 bnx2x_napi_enable_cnic(bp);
1821 bnx2x_int_enable(bp);
1822 if (bp->state == BNX2X_STATE_OPEN)
1823 netif_tx_wake_all_queues(bp->dev);
1827 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1829 bnx2x_int_disable_sync(bp, disable_hw);
1830 bnx2x_napi_disable(bp);
1831 if (CNIC_LOADED(bp))
1832 bnx2x_napi_disable_cnic(bp);
1835 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1837 struct bnx2x *bp = netdev_priv(dev);
1839 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1840 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1841 u16 ether_type = ntohs(hdr->h_proto);
1843 /* Skip VLAN tag if present */
1844 if (ether_type == ETH_P_8021Q) {
1845 struct vlan_ethhdr *vhdr =
1846 (struct vlan_ethhdr *)skb->data;
1848 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1851 /* If ethertype is FCoE or FIP - use FCoE ring */
1852 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1853 return bnx2x_fcoe_tx(bp, txq_index);
1856 /* select a non-FCoE queue */
1857 return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
1860 void bnx2x_set_num_queues(struct bnx2x *bp)
1863 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1865 /* override in STORAGE SD modes */
1866 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1867 bp->num_ethernet_queues = 1;
1869 /* Add special queues */
1870 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1871 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1873 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1877 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1879 * @bp: Driver handle
1881 * We currently support for at most 16 Tx queues for each CoS thus we will
1882 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1885 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1886 * index after all ETH L2 indices.
1888 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1889 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1890 * 16..31,...) with indices that are not coupled with any real Tx queue.
1892 * The proper configuration of skb->queue_mapping is handled by
1893 * bnx2x_select_queue() and __skb_tx_hash().
1895 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1896 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1898 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1902 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1903 rx = BNX2X_NUM_ETH_QUEUES(bp);
1905 /* account for fcoe queue */
1906 if (include_cnic && !NO_FCOE(bp)) {
1911 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1913 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1916 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1918 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1922 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1928 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1932 for_each_queue(bp, i) {
1933 struct bnx2x_fastpath *fp = &bp->fp[i];
1936 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1939 * Although there are no IP frames expected to arrive to
1940 * this ring we still want to add an
1941 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1944 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1947 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1948 IP_HEADER_ALIGNMENT_PADDING +
1951 BNX2X_FW_RX_ALIGN_END;
1952 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
1953 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1954 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1956 fp->rx_frag_size = 0;
1960 static int bnx2x_init_rss(struct bnx2x *bp)
1963 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1965 /* Prepare the initial contents for the indirection table if RSS is
1968 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1969 bp->rss_conf_obj.ind_table[i] =
1971 ethtool_rxfh_indir_default(i, num_eth_queues);
1974 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1975 * per-port, so if explicit configuration is needed , do it only
1978 * For 57712 and newer on the other hand it's a per-function
1981 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
1984 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1985 bool config_hash, bool enable)
1987 struct bnx2x_config_rss_params params = {NULL};
1989 /* Although RSS is meaningless when there is a single HW queue we
1990 * still need it enabled in order to have HW Rx hash generated.
1992 * if (!is_eth_multi(bp))
1993 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1996 params.rss_obj = rss_obj;
1998 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
2001 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
2003 /* RSS configuration */
2004 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
2005 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
2006 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
2007 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
2008 if (rss_obj->udp_rss_v4)
2009 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
2010 if (rss_obj->udp_rss_v6)
2011 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
2013 __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags);
2017 params.rss_result_mask = MULTI_MASK;
2019 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2023 prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
2024 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
2028 return bnx2x_config_rss(bp, ¶ms);
2030 return bnx2x_vfpf_config_rss(bp, ¶ms);
2033 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2035 struct bnx2x_func_state_params func_params = {NULL};
2037 /* Prepare parameters for function state transitions */
2038 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2040 func_params.f_obj = &bp->func_obj;
2041 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2043 func_params.params.hw_init.load_phase = load_code;
2045 return bnx2x_func_state_change(bp, &func_params);
2049 * Cleans the object that have internal lists without sending
2050 * ramrods. Should be run when interrupts are disabled.
2052 void bnx2x_squeeze_objects(struct bnx2x *bp)
2055 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2056 struct bnx2x_mcast_ramrod_params rparam = {NULL};
2057 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2059 /***************** Cleanup MACs' object first *************************/
2061 /* Wait for completion of requested */
2062 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2063 /* Perform a dry cleanup */
2064 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2066 /* Clean ETH primary MAC */
2067 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2068 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2071 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2073 /* Cleanup UC list */
2075 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2076 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2079 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2081 /***************** Now clean mcast object *****************************/
2082 rparam.mcast_obj = &bp->mcast_obj;
2083 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2085 /* Add a DEL command... - Since we're doing a driver cleanup only,
2086 * we take a lock surrounding both the initial send and the CONTs,
2087 * as we don't want a true completion to disrupt us in the middle.
2089 netif_addr_lock_bh(bp->dev);
2090 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2092 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2095 /* ...and wait until all pending commands are cleared */
2096 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2099 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2101 netif_addr_unlock_bh(bp->dev);
2105 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2107 netif_addr_unlock_bh(bp->dev);
2110 #ifndef BNX2X_STOP_ON_ERROR
2111 #define LOAD_ERROR_EXIT(bp, label) \
2113 (bp)->state = BNX2X_STATE_ERROR; \
2117 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2119 bp->cnic_loaded = false; \
2122 #else /*BNX2X_STOP_ON_ERROR*/
2123 #define LOAD_ERROR_EXIT(bp, label) \
2125 (bp)->state = BNX2X_STATE_ERROR; \
2129 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2131 bp->cnic_loaded = false; \
2135 #endif /*BNX2X_STOP_ON_ERROR*/
2137 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2139 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2140 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2144 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2146 int num_groups, vf_headroom = 0;
2147 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2149 /* number of queues for statistics is number of eth queues + FCoE */
2150 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2152 /* Total number of FW statistics requests =
2153 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2154 * and fcoe l2 queue) stats + num of queues (which includes another 1
2155 * for fcoe l2 queue if applicable)
2157 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2159 /* vf stats appear in the request list, but their data is allocated by
2160 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2161 * it is used to determine where to place the vf stats queries in the
2165 vf_headroom = bnx2x_vf_headroom(bp);
2167 /* Request is built from stats_query_header and an array of
2168 * stats_query_cmd_group each of which contains
2169 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2170 * configured in the stats_query_header.
2173 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2174 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2177 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2178 bp->fw_stats_num, vf_headroom, num_groups);
2179 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2180 num_groups * sizeof(struct stats_query_cmd_group);
2182 /* Data for statistics requests + stats_counter
2183 * stats_counter holds per-STORM counters that are incremented
2184 * when STORM has finished with the current request.
2185 * memory for FCoE offloaded statistics are counted anyway,
2186 * even if they will not be sent.
2187 * VF stats are not accounted for here as the data of VF stats is stored
2188 * in memory allocated by the VF, not here.
2190 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2191 sizeof(struct per_pf_stats) +
2192 sizeof(struct fcoe_statistics_params) +
2193 sizeof(struct per_queue_stats) * num_queue_stats +
2194 sizeof(struct stats_counter);
2196 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2197 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2200 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2201 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2202 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2203 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2204 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2205 bp->fw_stats_req_sz;
2207 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2208 U64_HI(bp->fw_stats_req_mapping),
2209 U64_LO(bp->fw_stats_req_mapping));
2210 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2211 U64_HI(bp->fw_stats_data_mapping),
2212 U64_LO(bp->fw_stats_data_mapping));
2216 bnx2x_free_fw_stats_mem(bp);
2217 BNX2X_ERR("Can't allocate FW stats memory\n");
2221 /* send load request to mcp and analyze response */
2222 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2228 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2229 DRV_MSG_SEQ_NUMBER_MASK);
2230 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2232 /* Get current FW pulse sequence */
2233 bp->fw_drv_pulse_wr_seq =
2234 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2235 DRV_PULSE_SEQ_MASK);
2236 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2238 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2240 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2241 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2244 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2246 /* if mcp fails to respond we must abort */
2247 if (!(*load_code)) {
2248 BNX2X_ERR("MCP response failure, aborting\n");
2252 /* If mcp refused (e.g. other port is in diagnostic mode) we
2255 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2256 BNX2X_ERR("MCP refused load request, aborting\n");
2262 /* check whether another PF has already loaded FW to chip. In
2263 * virtualized environments a pf from another VM may have already
2264 * initialized the device including loading FW
2266 int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
2268 /* is another pf loaded on this engine? */
2269 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2270 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2271 /* build my FW version dword */
2272 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2273 (BCM_5710_FW_MINOR_VERSION << 8) +
2274 (BCM_5710_FW_REVISION_VERSION << 16) +
2275 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2277 /* read loaded FW from chip */
2278 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2280 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2283 /* abort nic load if version mismatch */
2284 if (my_fw != loaded_fw) {
2285 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2293 /* returns the "mcp load_code" according to global load_count array */
2294 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2296 int path = BP_PATH(bp);
2298 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2299 path, load_count[path][0], load_count[path][1],
2300 load_count[path][2]);
2301 load_count[path][0]++;
2302 load_count[path][1 + port]++;
2303 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2304 path, load_count[path][0], load_count[path][1],
2305 load_count[path][2]);
2306 if (load_count[path][0] == 1)
2307 return FW_MSG_CODE_DRV_LOAD_COMMON;
2308 else if (load_count[path][1 + port] == 1)
2309 return FW_MSG_CODE_DRV_LOAD_PORT;
2311 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2314 /* mark PMF if applicable */
2315 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2317 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2318 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2319 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2321 /* We need the barrier to ensure the ordering between the
2322 * writing to bp->port.pmf here and reading it from the
2323 * bnx2x_periodic_task().
2330 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2333 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2335 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2336 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2337 (bp->common.shmem2_base)) {
2338 if (SHMEM2_HAS(bp, dcc_support))
2339 SHMEM2_WR(bp, dcc_support,
2340 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2341 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2342 if (SHMEM2_HAS(bp, afex_driver_support))
2343 SHMEM2_WR(bp, afex_driver_support,
2344 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2347 /* Set AFEX default VLAN tag to an invalid value */
2348 bp->afex_def_vlan_tag = -1;
2352 * bnx2x_bz_fp - zero content of the fastpath structure.
2354 * @bp: driver handle
2355 * @index: fastpath index to be zeroed
2357 * Makes sure the contents of the bp->fp[index].napi is kept
2360 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2362 struct bnx2x_fastpath *fp = &bp->fp[index];
2364 struct napi_struct orig_napi = fp->napi;
2365 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2367 /* bzero bnx2x_fastpath contents */
2369 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2370 sizeof(struct bnx2x_agg_info));
2371 memset(fp, 0, sizeof(*fp));
2373 /* Restore the NAPI object as it has been already initialized */
2374 fp->napi = orig_napi;
2375 fp->tpa_info = orig_tpa_info;
2379 fp->max_cos = bp->max_cos;
2381 /* Special queues support only one CoS */
2384 /* Init txdata pointers */
2386 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2388 for_each_cos_in_tx_queue(fp, cos)
2389 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2390 BNX2X_NUM_ETH_QUEUES(bp) + index];
2392 /* set the tpa flag for each queue. The tpa flag determines the queue
2393 * minimal size so it must be set prior to queue memory allocation
2395 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2396 (bp->flags & GRO_ENABLE_FLAG &&
2397 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2398 if (bp->flags & TPA_ENABLE_FLAG)
2399 fp->mode = TPA_MODE_LRO;
2400 else if (bp->flags & GRO_ENABLE_FLAG)
2401 fp->mode = TPA_MODE_GRO;
2403 /* We don't want TPA on an FCoE L2 ring */
2405 fp->disable_tpa = 1;
2408 int bnx2x_load_cnic(struct bnx2x *bp)
2410 int i, rc, port = BP_PORT(bp);
2412 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2414 mutex_init(&bp->cnic_mutex);
2417 rc = bnx2x_alloc_mem_cnic(bp);
2419 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2420 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2424 rc = bnx2x_alloc_fp_mem_cnic(bp);
2426 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2427 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2430 /* Update the number of queues with the cnic queues */
2431 rc = bnx2x_set_real_num_queues(bp, 1);
2433 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2434 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2437 /* Add all CNIC NAPI objects */
2438 bnx2x_add_all_napi_cnic(bp);
2439 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2440 bnx2x_napi_enable_cnic(bp);
2442 rc = bnx2x_init_hw_func_cnic(bp);
2444 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2446 bnx2x_nic_init_cnic(bp);
2449 /* Enable Timer scan */
2450 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2452 /* setup cnic queues */
2453 for_each_cnic_queue(bp, i) {
2454 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2456 BNX2X_ERR("Queue setup failed\n");
2457 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2462 /* Initialize Rx filter. */
2463 bnx2x_set_rx_mode_inner(bp);
2465 /* re-read iscsi info */
2466 bnx2x_get_iscsi_info(bp);
2467 bnx2x_setup_cnic_irq_info(bp);
2468 bnx2x_setup_cnic_info(bp);
2469 bp->cnic_loaded = true;
2470 if (bp->state == BNX2X_STATE_OPEN)
2471 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2473 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2477 #ifndef BNX2X_STOP_ON_ERROR
2479 /* Disable Timer scan */
2480 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2483 bnx2x_napi_disable_cnic(bp);
2484 /* Update the number of queues without the cnic queues */
2485 if (bnx2x_set_real_num_queues(bp, 0))
2486 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2488 BNX2X_ERR("CNIC-related load failed\n");
2489 bnx2x_free_fp_mem_cnic(bp);
2490 bnx2x_free_mem_cnic(bp);
2492 #endif /* ! BNX2X_STOP_ON_ERROR */
2495 /* must be called with rtnl_lock */
2496 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2498 int port = BP_PORT(bp);
2499 int i, rc = 0, load_code = 0;
2501 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2503 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2505 #ifdef BNX2X_STOP_ON_ERROR
2506 if (unlikely(bp->panic)) {
2507 BNX2X_ERR("Can't load NIC when there is panic\n");
2512 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2514 /* zero the structure w/o any lock, before SP handler is initialized */
2515 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2516 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2517 &bp->last_reported_link.link_report_flags);
2520 /* must be called before memory allocation and HW init */
2521 bnx2x_ilt_set_info(bp);
2524 * Zero fastpath structures preserving invariants like napi, which are
2525 * allocated only once, fp index, max_cos, bp pointer.
2526 * Also set fp->disable_tpa and txdata_ptr.
2528 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2529 for_each_queue(bp, i)
2531 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2532 bp->num_cnic_queues) *
2533 sizeof(struct bnx2x_fp_txdata));
2535 bp->fcoe_init = false;
2537 /* Set the receive queues buffer size */
2538 bnx2x_set_rx_buf_size(bp);
2541 rc = bnx2x_alloc_mem(bp);
2543 BNX2X_ERR("Unable to allocate bp memory\n");
2548 /* need to be done after alloc mem, since it's self adjusting to amount
2549 * of memory available for RSS queues
2551 rc = bnx2x_alloc_fp_mem(bp);
2553 BNX2X_ERR("Unable to allocate memory for fps\n");
2554 LOAD_ERROR_EXIT(bp, load_error0);
2557 /* Allocated memory for FW statistics */
2558 if (bnx2x_alloc_fw_stats_mem(bp))
2559 LOAD_ERROR_EXIT(bp, load_error0);
2561 /* request pf to initialize status blocks */
2563 rc = bnx2x_vfpf_init(bp);
2565 LOAD_ERROR_EXIT(bp, load_error0);
2568 /* As long as bnx2x_alloc_mem() may possibly update
2569 * bp->num_queues, bnx2x_set_real_num_queues() should always
2570 * come after it. At this stage cnic queues are not counted.
2572 rc = bnx2x_set_real_num_queues(bp, 0);
2574 BNX2X_ERR("Unable to set real_num_queues\n");
2575 LOAD_ERROR_EXIT(bp, load_error0);
2578 /* configure multi cos mappings in kernel.
2579 * this configuration may be overridden by a multi class queue
2580 * discipline or by a dcbx negotiation result.
2582 bnx2x_setup_tc(bp->dev, bp->max_cos);
2584 /* Add all NAPI objects */
2585 bnx2x_add_all_napi(bp);
2586 DP(NETIF_MSG_IFUP, "napi added\n");
2587 bnx2x_napi_enable(bp);
2590 /* set pf load just before approaching the MCP */
2591 bnx2x_set_pf_load(bp);
2593 /* if mcp exists send load request and analyze response */
2594 if (!BP_NOMCP(bp)) {
2595 /* attempt to load pf */
2596 rc = bnx2x_nic_load_request(bp, &load_code);
2598 LOAD_ERROR_EXIT(bp, load_error1);
2600 /* what did mcp say? */
2601 rc = bnx2x_nic_load_analyze_req(bp, load_code);
2603 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2604 LOAD_ERROR_EXIT(bp, load_error2);
2607 load_code = bnx2x_nic_load_no_mcp(bp, port);
2610 /* mark pmf if applicable */
2611 bnx2x_nic_load_pmf(bp, load_code);
2613 /* Init Function state controlling object */
2614 bnx2x__init_func_obj(bp);
2617 rc = bnx2x_init_hw(bp, load_code);
2619 BNX2X_ERR("HW init failed, aborting\n");
2620 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2621 LOAD_ERROR_EXIT(bp, load_error2);
2625 bnx2x_pre_irq_nic_init(bp);
2627 /* Connect to IRQs */
2628 rc = bnx2x_setup_irqs(bp);
2630 BNX2X_ERR("setup irqs failed\n");
2632 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2633 LOAD_ERROR_EXIT(bp, load_error2);
2636 /* Init per-function objects */
2638 /* Setup NIC internals and enable interrupts */
2639 bnx2x_post_irq_nic_init(bp, load_code);
2641 bnx2x_init_bp_objs(bp);
2642 bnx2x_iov_nic_init(bp);
2644 /* Set AFEX default VLAN tag to an invalid value */
2645 bp->afex_def_vlan_tag = -1;
2646 bnx2x_nic_load_afex_dcc(bp, load_code);
2647 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2648 rc = bnx2x_func_start(bp);
2650 BNX2X_ERR("Function start failed!\n");
2651 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2653 LOAD_ERROR_EXIT(bp, load_error3);
2656 /* Send LOAD_DONE command to MCP */
2657 if (!BP_NOMCP(bp)) {
2658 load_code = bnx2x_fw_command(bp,
2659 DRV_MSG_CODE_LOAD_DONE, 0);
2661 BNX2X_ERR("MCP response failure, aborting\n");
2663 LOAD_ERROR_EXIT(bp, load_error3);
2667 /* initialize FW coalescing state machines in RAM */
2668 bnx2x_update_coalesce(bp);
2671 /* setup the leading queue */
2672 rc = bnx2x_setup_leading(bp);
2674 BNX2X_ERR("Setup leading failed!\n");
2675 LOAD_ERROR_EXIT(bp, load_error3);
2678 /* set up the rest of the queues */
2679 for_each_nondefault_eth_queue(bp, i) {
2681 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2683 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2685 BNX2X_ERR("Queue %d setup failed\n", i);
2686 LOAD_ERROR_EXIT(bp, load_error3);
2691 rc = bnx2x_init_rss(bp);
2693 BNX2X_ERR("PF RSS init failed\n");
2694 LOAD_ERROR_EXIT(bp, load_error3);
2697 /* Now when Clients are configured we are ready to work */
2698 bp->state = BNX2X_STATE_OPEN;
2700 /* Configure a ucast MAC */
2702 rc = bnx2x_set_eth_mac(bp, true);
2704 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2707 BNX2X_ERR("Setting Ethernet MAC failed\n");
2708 LOAD_ERROR_EXIT(bp, load_error3);
2711 if (IS_PF(bp) && bp->pending_max) {
2712 bnx2x_update_max_mf_config(bp, bp->pending_max);
2713 bp->pending_max = 0;
2717 rc = bnx2x_initial_phy_init(bp, load_mode);
2719 LOAD_ERROR_EXIT(bp, load_error3);
2721 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2723 /* Start fast path */
2725 /* Initialize Rx filter. */
2726 bnx2x_set_rx_mode_inner(bp);
2729 switch (load_mode) {
2731 /* Tx queue should be only re-enabled */
2732 netif_tx_wake_all_queues(bp->dev);
2736 netif_tx_start_all_queues(bp->dev);
2737 smp_mb__after_clear_bit();
2741 case LOAD_LOOPBACK_EXT:
2742 bp->state = BNX2X_STATE_DIAG;
2750 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2752 bnx2x__link_status_update(bp);
2754 /* start the timer */
2755 mod_timer(&bp->timer, jiffies + bp->current_interval);
2757 if (CNIC_ENABLED(bp))
2758 bnx2x_load_cnic(bp);
2760 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2761 /* mark driver is loaded in shmem2 */
2763 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2764 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2765 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2766 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2769 /* Wait for all pending SP commands to complete */
2770 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2771 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2772 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2776 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2777 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2778 bnx2x_dcbx_init(bp, false);
2780 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2784 #ifndef BNX2X_STOP_ON_ERROR
2787 bnx2x_int_disable_sync(bp, 1);
2789 /* Clean queueable objects */
2790 bnx2x_squeeze_objects(bp);
2793 /* Free SKBs, SGEs, TPA pool and driver internals */
2794 bnx2x_free_skbs(bp);
2795 for_each_rx_queue(bp, i)
2796 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2801 if (IS_PF(bp) && !BP_NOMCP(bp)) {
2802 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2803 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2808 bnx2x_napi_disable(bp);
2809 bnx2x_del_all_napi(bp);
2811 /* clear pf_load status, as it was already set */
2813 bnx2x_clear_pf_load(bp);
2815 bnx2x_free_fw_stats_mem(bp);
2816 bnx2x_free_fp_mem(bp);
2820 #endif /* ! BNX2X_STOP_ON_ERROR */
2823 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2827 /* Wait until tx fastpath tasks complete */
2828 for_each_tx_queue(bp, i) {
2829 struct bnx2x_fastpath *fp = &bp->fp[i];
2831 for_each_cos_in_tx_queue(fp, cos)
2832 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2839 /* must be called with rtnl_lock */
2840 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2843 bool global = false;
2845 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2847 /* mark driver is unloaded in shmem2 */
2848 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2850 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2851 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2852 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2855 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2856 (bp->state == BNX2X_STATE_CLOSED ||
2857 bp->state == BNX2X_STATE_ERROR)) {
2858 /* We can get here if the driver has been unloaded
2859 * during parity error recovery and is either waiting for a
2860 * leader to complete or for other functions to unload and
2861 * then ifdown has been issued. In this case we want to
2862 * unload and let other functions to complete a recovery
2865 bp->recovery_state = BNX2X_RECOVERY_DONE;
2867 bnx2x_release_leader_lock(bp);
2870 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2871 BNX2X_ERR("Can't unload in closed or error state\n");
2875 /* Nothing to do during unload if previous bnx2x_nic_load()
2876 * have not completed successfully - all resources are released.
2878 * we can get here only after unsuccessful ndo_* callback, during which
2879 * dev->IFF_UP flag is still on.
2881 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2884 /* It's important to set the bp->state to the value different from
2885 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2886 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2888 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2891 /* indicate to VFs that the PF is going down */
2892 bnx2x_iov_channel_down(bp);
2894 if (CNIC_LOADED(bp))
2895 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2898 bnx2x_tx_disable(bp);
2899 netdev_reset_tc(bp->dev);
2901 bp->rx_mode = BNX2X_RX_MODE_NONE;
2903 del_timer_sync(&bp->timer);
2906 /* Set ALWAYS_ALIVE bit in shmem */
2907 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2908 bnx2x_drv_pulse(bp);
2909 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2910 bnx2x_save_statistics(bp);
2913 /* wait till consumers catch up with producers in all queues */
2914 bnx2x_drain_tx_queues(bp);
2916 /* if VF indicate to PF this function is going down (PF will delete sp
2917 * elements and clear initializations
2920 bnx2x_vfpf_close_vf(bp);
2921 else if (unload_mode != UNLOAD_RECOVERY)
2922 /* if this is a normal/close unload need to clean up chip*/
2923 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
2925 /* Send the UNLOAD_REQUEST to the MCP */
2926 bnx2x_send_unload_req(bp, unload_mode);
2928 /* Prevent transactions to host from the functions on the
2929 * engine that doesn't reset global blocks in case of global
2930 * attention once global blocks are reset and gates are opened
2931 * (the engine which leader will perform the recovery
2934 if (!CHIP_IS_E1x(bp))
2935 bnx2x_pf_disable(bp);
2937 /* Disable HW interrupts, NAPI */
2938 bnx2x_netif_stop(bp, 1);
2939 /* Delete all NAPI objects */
2940 bnx2x_del_all_napi(bp);
2941 if (CNIC_LOADED(bp))
2942 bnx2x_del_all_napi_cnic(bp);
2946 /* Report UNLOAD_DONE to MCP */
2947 bnx2x_send_unload_done(bp, false);
2951 * At this stage no more interrupts will arrive so we may safely clean
2952 * the queueable objects here in case they failed to get cleaned so far.
2955 bnx2x_squeeze_objects(bp);
2957 /* There should be no more pending SP commands at this stage */
2962 /* clear pending work in rtnl task */
2963 bp->sp_rtnl_state = 0;
2966 /* Free SKBs, SGEs, TPA pool and driver internals */
2967 bnx2x_free_skbs(bp);
2968 if (CNIC_LOADED(bp))
2969 bnx2x_free_skbs_cnic(bp);
2970 for_each_rx_queue(bp, i)
2971 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2973 bnx2x_free_fp_mem(bp);
2974 if (CNIC_LOADED(bp))
2975 bnx2x_free_fp_mem_cnic(bp);
2978 if (CNIC_LOADED(bp))
2979 bnx2x_free_mem_cnic(bp);
2983 bp->state = BNX2X_STATE_CLOSED;
2984 bp->cnic_loaded = false;
2986 /* Check if there are pending parity attentions. If there are - set
2987 * RECOVERY_IN_PROGRESS.
2989 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
2990 bnx2x_set_reset_in_progress(bp);
2992 /* Set RESET_IS_GLOBAL if needed */
2994 bnx2x_set_reset_global(bp);
2997 /* The last driver must disable a "close the gate" if there is no
2998 * parity attention or "process kill" pending.
3001 !bnx2x_clear_pf_load(bp) &&
3002 bnx2x_reset_is_done(bp, BP_PATH(bp)))
3003 bnx2x_disable_close_the_gate(bp);
3005 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3010 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3014 /* If there is no power capability, silently succeed */
3015 if (!bp->pdev->pm_cap) {
3016 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3020 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3024 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3025 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3026 PCI_PM_CTRL_PME_STATUS));
3028 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3029 /* delay required during transition out of D3hot */
3034 /* If there are other clients above don't
3035 shut down the power */
3036 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3038 /* Don't shut down the power for emulation and FPGA */
3039 if (CHIP_REV_IS_SLOW(bp))
3042 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3046 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3048 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3051 /* No more memory access after this point until
3052 * device is brought back to D0.
3057 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3064 * net_device service functions
3066 int bnx2x_poll(struct napi_struct *napi, int budget)
3070 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3072 struct bnx2x *bp = fp->bp;
3075 #ifdef BNX2X_STOP_ON_ERROR
3076 if (unlikely(bp->panic)) {
3077 napi_complete(napi);
3081 if (!bnx2x_fp_lock_napi(fp))
3084 for_each_cos_in_tx_queue(fp, cos)
3085 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3086 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3088 if (bnx2x_has_rx_work(fp)) {
3089 work_done += bnx2x_rx_int(fp, budget - work_done);
3091 /* must not complete if we consumed full budget */
3092 if (work_done >= budget) {
3093 bnx2x_fp_unlock_napi(fp);
3098 /* Fall out from the NAPI loop if needed */
3099 if (!bnx2x_fp_unlock_napi(fp) &&
3100 !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3102 /* No need to update SB for FCoE L2 ring as long as
3103 * it's connected to the default SB and the SB
3104 * has been updated when NAPI was scheduled.
3106 if (IS_FCOE_FP(fp)) {
3107 napi_complete(napi);
3110 bnx2x_update_fpsb_idx(fp);
3111 /* bnx2x_has_rx_work() reads the status block,
3112 * thus we need to ensure that status block indices
3113 * have been actually read (bnx2x_update_fpsb_idx)
3114 * prior to this check (bnx2x_has_rx_work) so that
3115 * we won't write the "newer" value of the status block
3116 * to IGU (if there was a DMA right after
3117 * bnx2x_has_rx_work and if there is no rmb, the memory
3118 * reading (bnx2x_update_fpsb_idx) may be postponed
3119 * to right before bnx2x_ack_sb). In this case there
3120 * will never be another interrupt until there is
3121 * another update of the status block, while there
3122 * is still unhandled work.
3126 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3127 napi_complete(napi);
3128 /* Re-enable interrupts */
3129 DP(NETIF_MSG_RX_STATUS,
3130 "Update index to %d\n", fp->fp_hc_idx);
3131 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3132 le16_to_cpu(fp->fp_hc_idx),
3142 #ifdef CONFIG_NET_RX_BUSY_POLL
3143 /* must be called with local_bh_disable()d */
3144 int bnx2x_low_latency_recv(struct napi_struct *napi)
3146 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3148 struct bnx2x *bp = fp->bp;
3151 if ((bp->state == BNX2X_STATE_CLOSED) ||
3152 (bp->state == BNX2X_STATE_ERROR) ||
3153 (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3154 return LL_FLUSH_FAILED;
3156 if (!bnx2x_fp_lock_poll(fp))
3157 return LL_FLUSH_BUSY;
3159 if (bnx2x_has_rx_work(fp))
3160 found = bnx2x_rx_int(fp, 4);
3162 bnx2x_fp_unlock_poll(fp);
3168 /* we split the first BD into headers and data BDs
3169 * to ease the pain of our fellow microcode engineers
3170 * we use one mapping for both BDs
3172 static u16 bnx2x_tx_split(struct bnx2x *bp,
3173 struct bnx2x_fp_txdata *txdata,
3174 struct sw_tx_bd *tx_buf,
3175 struct eth_tx_start_bd **tx_bd, u16 hlen,
3178 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3179 struct eth_tx_bd *d_tx_bd;
3181 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3183 /* first fix first BD */
3184 h_tx_bd->nbytes = cpu_to_le16(hlen);
3186 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3187 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3189 /* now get a new data BD
3190 * (after the pbd) and fill it */
3191 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3192 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3194 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3195 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3197 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3198 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3199 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3201 /* this marks the BD as one that has no individual mapping */
3202 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3204 DP(NETIF_MSG_TX_QUEUED,
3205 "TSO split data size is %d (%x:%x)\n",
3206 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3209 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3214 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3215 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3216 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3218 __sum16 tsum = (__force __sum16) csum;
3221 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3222 csum_partial(t_header - fix, fix, 0)));
3225 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3226 csum_partial(t_header, -fix, 0)));
3228 return bswab16(tsum);
3231 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3237 if (skb->ip_summed != CHECKSUM_PARTIAL)
3240 protocol = vlan_get_protocol(skb);
3241 if (protocol == htons(ETH_P_IPV6)) {
3243 prot = ipv6_hdr(skb)->nexthdr;
3246 prot = ip_hdr(skb)->protocol;
3249 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3250 if (inner_ip_hdr(skb)->version == 6) {
3251 rc |= XMIT_CSUM_ENC_V6;
3252 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3253 rc |= XMIT_CSUM_TCP;
3255 rc |= XMIT_CSUM_ENC_V4;
3256 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3257 rc |= XMIT_CSUM_TCP;
3260 if (prot == IPPROTO_TCP)
3261 rc |= XMIT_CSUM_TCP;
3263 if (skb_is_gso(skb)) {
3264 if (skb_is_gso_v6(skb)) {
3265 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3266 if (rc & XMIT_CSUM_ENC)
3267 rc |= XMIT_GSO_ENC_V6;
3269 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3270 if (rc & XMIT_CSUM_ENC)
3271 rc |= XMIT_GSO_ENC_V4;
3278 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3279 /* check if packet requires linearization (packet is too fragmented)
3280 no need to check fragmentation if page size > 8K (there will be no
3281 violation to FW restrictions) */
3282 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3287 int first_bd_sz = 0;
3289 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3290 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3292 if (xmit_type & XMIT_GSO) {
3293 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3294 /* Check if LSO packet needs to be copied:
3295 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3296 int wnd_size = MAX_FETCH_BD - 3;
3297 /* Number of windows to check */
3298 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3303 /* Headers length */
3304 hlen = (int)(skb_transport_header(skb) - skb->data) +
3307 /* Amount of data (w/o headers) on linear part of SKB*/
3308 first_bd_sz = skb_headlen(skb) - hlen;
3310 wnd_sum = first_bd_sz;
3312 /* Calculate the first sum - it's special */
3313 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3315 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3317 /* If there was data on linear skb data - check it */
3318 if (first_bd_sz > 0) {
3319 if (unlikely(wnd_sum < lso_mss)) {
3324 wnd_sum -= first_bd_sz;
3327 /* Others are easier: run through the frag list and
3328 check all windows */
3329 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3331 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3333 if (unlikely(wnd_sum < lso_mss)) {
3338 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3341 /* in non-LSO too fragmented packet should always
3348 if (unlikely(to_copy))
3349 DP(NETIF_MSG_TX_QUEUED,
3350 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3351 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3352 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3358 static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3361 struct ipv6hdr *ipv6;
3363 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3364 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3365 ETH_TX_PARSE_BD_E2_LSO_MSS;
3367 if (xmit_type & XMIT_GSO_ENC_V6)
3368 ipv6 = inner_ipv6_hdr(skb);
3369 else if (xmit_type & XMIT_GSO_V6)
3370 ipv6 = ipv6_hdr(skb);
3374 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
3375 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3379 * bnx2x_set_pbd_gso - update PBD in GSO case.
3383 * @xmit_type: xmit flags
3385 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3386 struct eth_tx_parse_bd_e1x *pbd,
3387 struct eth_tx_start_bd *tx_start_bd,
3390 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3391 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3392 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3394 if (xmit_type & XMIT_GSO_V4) {
3395 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3396 pbd->tcp_pseudo_csum =
3397 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3399 0, IPPROTO_TCP, 0));
3401 /* GSO on 57710/57711 needs FW to calculate IP checksum */
3402 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3404 pbd->tcp_pseudo_csum =
3405 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3406 &ipv6_hdr(skb)->daddr,
3407 0, IPPROTO_TCP, 0));
3411 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3415 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3417 * @bp: driver handle
3419 * @parsing_data: data to be updated
3420 * @xmit_type: xmit flags
3422 * 57712/578xx related, when skb has encapsulation
3424 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3425 u32 *parsing_data, u32 xmit_type)
3428 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3429 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3430 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3432 if (xmit_type & XMIT_CSUM_TCP) {
3433 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3434 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3435 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3437 return skb_inner_transport_header(skb) +
3438 inner_tcp_hdrlen(skb) - skb->data;
3441 /* We support checksum offload for TCP and UDP only.
3442 * No need to pass the UDP header length - it's a constant.
3444 return skb_inner_transport_header(skb) +
3445 sizeof(struct udphdr) - skb->data;
3449 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3451 * @bp: driver handle
3453 * @parsing_data: data to be updated
3454 * @xmit_type: xmit flags
3456 * 57712/578xx related
3458 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3459 u32 *parsing_data, u32 xmit_type)
3462 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3463 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3464 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3466 if (xmit_type & XMIT_CSUM_TCP) {
3467 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3468 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3469 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3471 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3473 /* We support checksum offload for TCP and UDP only.
3474 * No need to pass the UDP header length - it's a constant.
3476 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3479 /* set FW indication according to inner or outer protocols if tunneled */
3480 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3481 struct eth_tx_start_bd *tx_start_bd,
3484 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3486 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3487 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3489 if (!(xmit_type & XMIT_CSUM_TCP))
3490 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3494 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3496 * @bp: driver handle
3498 * @pbd: parse BD to be updated
3499 * @xmit_type: xmit flags
3501 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3502 struct eth_tx_parse_bd_e1x *pbd,
3505 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3507 /* for now NS flag is not used in Linux */
3510 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3511 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3513 pbd->ip_hlen_w = (skb_transport_header(skb) -
3514 skb_network_header(skb)) >> 1;
3516 hlen += pbd->ip_hlen_w;
3518 /* We support checksum offload for TCP and UDP only */
3519 if (xmit_type & XMIT_CSUM_TCP)
3520 hlen += tcp_hdrlen(skb) / 2;
3522 hlen += sizeof(struct udphdr) / 2;
3524 pbd->total_hlen_w = cpu_to_le16(hlen);
3527 if (xmit_type & XMIT_CSUM_TCP) {
3528 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3531 s8 fix = SKB_CS_OFF(skb); /* signed! */
3533 DP(NETIF_MSG_TX_QUEUED,
3534 "hlen %d fix %d csum before fix %x\n",
3535 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3537 /* HW bug: fixup the CSUM */
3538 pbd->tcp_pseudo_csum =
3539 bnx2x_csum_fix(skb_transport_header(skb),
3542 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3543 pbd->tcp_pseudo_csum);
3549 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3550 struct eth_tx_parse_bd_e2 *pbd_e2,
3551 struct eth_tx_parse_2nd_bd *pbd2,
3556 u8 outerip_off, outerip_len = 0;
3558 /* from outer IP to transport */
3559 hlen_w = (skb_inner_transport_header(skb) -
3560 skb_network_header(skb)) >> 1;
3563 hlen_w += inner_tcp_hdrlen(skb) >> 1;
3565 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3567 /* outer IP header info */
3568 if (xmit_type & XMIT_CSUM_V4) {
3569 struct iphdr *iph = ip_hdr(skb);
3570 u32 csum = (__force u32)(~iph->check) -
3571 (__force u32)iph->tot_len -
3572 (__force u32)iph->frag_off;
3574 pbd2->fw_ip_csum_wo_len_flags_frag =
3575 bswab16(csum_fold((__force __wsum)csum));
3577 pbd2->fw_ip_hdr_to_payload_w =
3578 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3581 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3583 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3585 if (xmit_type & XMIT_GSO_V4) {
3586 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3588 pbd_e2->data.tunnel_data.pseudo_csum =
3589 bswab16(~csum_tcpudp_magic(
3590 inner_ip_hdr(skb)->saddr,
3591 inner_ip_hdr(skb)->daddr,
3592 0, IPPROTO_TCP, 0));
3594 outerip_len = ip_hdr(skb)->ihl << 1;
3596 pbd_e2->data.tunnel_data.pseudo_csum =
3597 bswab16(~csum_ipv6_magic(
3598 &inner_ipv6_hdr(skb)->saddr,
3599 &inner_ipv6_hdr(skb)->daddr,
3600 0, IPPROTO_TCP, 0));
3603 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3607 (!!(xmit_type & XMIT_CSUM_V6) <<
3608 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3610 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3611 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3612 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3614 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3615 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3616 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3620 /* called with netif_tx_lock
3621 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3622 * netif_wake_queue()
3624 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3626 struct bnx2x *bp = netdev_priv(dev);
3628 struct netdev_queue *txq;
3629 struct bnx2x_fp_txdata *txdata;
3630 struct sw_tx_bd *tx_buf;
3631 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3632 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3633 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3634 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3635 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3636 u32 pbd_e2_parsing_data = 0;
3637 u16 pkt_prod, bd_prod;
3640 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3643 __le16 pkt_size = 0;
3645 u8 mac_type = UNICAST_ADDRESS;
3647 #ifdef BNX2X_STOP_ON_ERROR
3648 if (unlikely(bp->panic))
3649 return NETDEV_TX_BUSY;
3652 txq_index = skb_get_queue_mapping(skb);
3653 txq = netdev_get_tx_queue(dev, txq_index);
3655 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3657 txdata = &bp->bnx2x_txq[txq_index];
3659 /* enable this debug print to view the transmission queue being used
3660 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3661 txq_index, fp_index, txdata_index); */
3663 /* enable this debug print to view the transmission details
3664 DP(NETIF_MSG_TX_QUEUED,
3665 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3666 txdata->cid, fp_index, txdata_index, txdata, fp); */
3668 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3669 skb_shinfo(skb)->nr_frags +
3671 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3672 /* Handle special storage cases separately */
3673 if (txdata->tx_ring_size == 0) {
3674 struct bnx2x_eth_q_stats *q_stats =
3675 bnx2x_fp_qstats(bp, txdata->parent_fp);
3676 q_stats->driver_filtered_tx_pkt++;
3678 return NETDEV_TX_OK;
3680 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3681 netif_tx_stop_queue(txq);
3682 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3684 return NETDEV_TX_BUSY;
3687 DP(NETIF_MSG_TX_QUEUED,
3688 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
3689 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3690 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3693 eth = (struct ethhdr *)skb->data;
3695 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3696 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3697 if (is_broadcast_ether_addr(eth->h_dest))
3698 mac_type = BROADCAST_ADDRESS;
3700 mac_type = MULTICAST_ADDRESS;
3703 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3704 /* First, check if we need to linearize the skb (due to FW
3705 restrictions). No need to check fragmentation if page size > 8K
3706 (there will be no violation to FW restrictions) */
3707 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3708 /* Statistics of linearization */
3710 if (skb_linearize(skb) != 0) {
3711 DP(NETIF_MSG_TX_QUEUED,
3712 "SKB linearization failed - silently dropping this SKB\n");
3713 dev_kfree_skb_any(skb);
3714 return NETDEV_TX_OK;
3718 /* Map skb linear data for DMA */
3719 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3720 skb_headlen(skb), DMA_TO_DEVICE);
3721 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3722 DP(NETIF_MSG_TX_QUEUED,
3723 "SKB mapping failed - silently dropping this SKB\n");
3724 dev_kfree_skb_any(skb);
3725 return NETDEV_TX_OK;
3728 Please read carefully. First we use one BD which we mark as start,
3729 then we have a parsing info BD (used for TSO or xsum),
3730 and only then we have the rest of the TSO BDs.
3731 (don't forget to mark the last one as last,
3732 and to unmap only AFTER you write to the BD ...)
3733 And above all, all pdb sizes are in words - NOT DWORDS!
3736 /* get current pkt produced now - advance it just before sending packet
3737 * since mapping of pages may fail and cause packet to be dropped
3739 pkt_prod = txdata->tx_pkt_prod;
3740 bd_prod = TX_BD(txdata->tx_bd_prod);
3742 /* get a tx_buf and first BD
3743 * tx_start_bd may be changed during SPLIT,
3744 * but first_bd will always stay first
3746 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3747 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3748 first_bd = tx_start_bd;
3750 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3752 /* header nbd: indirectly zero other flags! */
3753 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3755 /* remember the first BD of the packet */
3756 tx_buf->first_bd = txdata->tx_bd_prod;
3760 DP(NETIF_MSG_TX_QUEUED,
3761 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3762 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3764 if (vlan_tx_tag_present(skb)) {
3765 tx_start_bd->vlan_or_ethertype =
3766 cpu_to_le16(vlan_tx_tag_get(skb));
3767 tx_start_bd->bd_flags.as_bitfield |=
3768 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3770 /* when transmitting in a vf, start bd must hold the ethertype
3771 * for fw to enforce it
3774 tx_start_bd->vlan_or_ethertype =
3775 cpu_to_le16(ntohs(eth->h_proto));
3777 /* used by FW for packet accounting */
3778 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3781 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3783 /* turn on parsing and get a BD */
3784 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3786 if (xmit_type & XMIT_CSUM)
3787 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3789 if (!CHIP_IS_E1x(bp)) {
3790 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3791 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3793 if (xmit_type & XMIT_CSUM_ENC) {
3794 u16 global_data = 0;
3796 /* Set PBD in enc checksum offload case */
3797 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3798 &pbd_e2_parsing_data,
3801 /* turn on 2nd parsing and get a BD */
3802 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3804 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3806 memset(pbd2, 0, sizeof(*pbd2));
3808 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3809 (skb_inner_network_header(skb) -
3812 if (xmit_type & XMIT_GSO_ENC)
3813 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3817 pbd2->global_data = cpu_to_le16(global_data);
3819 /* add addition parse BD indication to start BD */
3820 SET_FLAG(tx_start_bd->general_data,
3821 ETH_TX_START_BD_PARSE_NBDS, 1);
3822 /* set encapsulation flag in start BD */
3823 SET_FLAG(tx_start_bd->general_data,
3824 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3826 } else if (xmit_type & XMIT_CSUM) {
3827 /* Set PBD in checksum offload case w/o encapsulation */
3828 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3829 &pbd_e2_parsing_data,
3833 /* Add the macs to the parsing BD this is a vf */
3835 /* override GRE parameters in BD */
3836 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3837 &pbd_e2->data.mac_addr.src_mid,
3838 &pbd_e2->data.mac_addr.src_lo,
3841 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3842 &pbd_e2->data.mac_addr.dst_mid,
3843 &pbd_e2->data.mac_addr.dst_lo,
3847 SET_FLAG(pbd_e2_parsing_data,
3848 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3850 u16 global_data = 0;
3851 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3852 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3853 /* Set PBD in checksum offload case */
3854 if (xmit_type & XMIT_CSUM)
3855 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3857 SET_FLAG(global_data,
3858 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3859 pbd_e1x->global_data |= cpu_to_le16(global_data);
3862 /* Setup the data pointer of the first BD of the packet */
3863 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3864 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3865 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3866 pkt_size = tx_start_bd->nbytes;
3868 DP(NETIF_MSG_TX_QUEUED,
3869 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
3870 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3871 le16_to_cpu(tx_start_bd->nbytes),
3872 tx_start_bd->bd_flags.as_bitfield,
3873 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3875 if (xmit_type & XMIT_GSO) {
3877 DP(NETIF_MSG_TX_QUEUED,
3878 "TSO packet len %d hlen %d total len %d tso size %d\n",
3879 skb->len, hlen, skb_headlen(skb),
3880 skb_shinfo(skb)->gso_size);
3882 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3884 if (unlikely(skb_headlen(skb) > hlen)) {
3886 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3890 if (!CHIP_IS_E1x(bp))
3891 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3894 bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
3897 /* Set the PBD's parsing_data field if not zero
3898 * (for the chips newer than 57711).
3900 if (pbd_e2_parsing_data)
3901 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3903 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3905 /* Handle fragmented skb */
3906 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3907 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3909 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3910 skb_frag_size(frag), DMA_TO_DEVICE);
3911 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3912 unsigned int pkts_compl = 0, bytes_compl = 0;
3914 DP(NETIF_MSG_TX_QUEUED,
3915 "Unable to map page - dropping packet...\n");
3917 /* we need unmap all buffers already mapped
3919 * first_bd->nbd need to be properly updated
3920 * before call to bnx2x_free_tx_pkt
3922 first_bd->nbd = cpu_to_le16(nbd);
3923 bnx2x_free_tx_pkt(bp, txdata,
3924 TX_BD(txdata->tx_pkt_prod),
3925 &pkts_compl, &bytes_compl);
3926 return NETDEV_TX_OK;
3929 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3930 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3931 if (total_pkt_bd == NULL)
3932 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3934 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3935 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3936 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3937 le16_add_cpu(&pkt_size, skb_frag_size(frag));
3940 DP(NETIF_MSG_TX_QUEUED,
3941 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3942 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3943 le16_to_cpu(tx_data_bd->nbytes));
3946 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3948 /* update with actual num BDs */
3949 first_bd->nbd = cpu_to_le16(nbd);
3951 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3953 /* now send a tx doorbell, counting the next BD
3954 * if the packet contains or ends with it
3956 if (TX_BD_POFF(bd_prod) < nbd)
3959 /* total_pkt_bytes should be set on the first data BD if
3960 * it's not an LSO packet and there is more than one
3961 * data BD. In this case pkt_size is limited by an MTU value.
3962 * However we prefer to set it for an LSO packet (while we don't
3963 * have to) in order to save some CPU cycles in a none-LSO
3964 * case, when we much more care about them.
3966 if (total_pkt_bd != NULL)
3967 total_pkt_bd->total_pkt_bytes = pkt_size;
3970 DP(NETIF_MSG_TX_QUEUED,
3971 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
3972 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3973 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3974 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3975 le16_to_cpu(pbd_e1x->total_hlen_w));
3977 DP(NETIF_MSG_TX_QUEUED,
3978 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3980 pbd_e2->data.mac_addr.dst_hi,
3981 pbd_e2->data.mac_addr.dst_mid,
3982 pbd_e2->data.mac_addr.dst_lo,
3983 pbd_e2->data.mac_addr.src_hi,
3984 pbd_e2->data.mac_addr.src_mid,
3985 pbd_e2->data.mac_addr.src_lo,
3986 pbd_e2->parsing_data);
3987 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3989 netdev_tx_sent_queue(txq, skb->len);
3991 skb_tx_timestamp(skb);
3993 txdata->tx_pkt_prod++;
3995 * Make sure that the BD data is updated before updating the producer
3996 * since FW might read the BD right after the producer is updated.
3997 * This is only applicable for weak-ordered memory model archs such
3998 * as IA-64. The following barrier is also mandatory since FW will
3999 * assumes packets must have BDs.
4003 txdata->tx_db.data.prod += nbd;
4006 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
4010 txdata->tx_bd_prod += nbd;
4012 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4013 netif_tx_stop_queue(txq);
4015 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4016 * ordering of set_bit() in netif_tx_stop_queue() and read of
4020 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4021 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4022 netif_tx_wake_queue(txq);
4026 return NETDEV_TX_OK;
4030 * bnx2x_setup_tc - routine to configure net_device for multi tc
4032 * @netdev: net device to configure
4033 * @tc: number of traffic classes to enable
4035 * callback connected to the ndo_setup_tc function pointer
4037 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4039 int cos, prio, count, offset;
4040 struct bnx2x *bp = netdev_priv(dev);
4042 /* setup tc must be called under rtnl lock */
4045 /* no traffic classes requested. Aborting */
4047 netdev_reset_tc(dev);
4051 /* requested to support too many traffic classes */
4052 if (num_tc > bp->max_cos) {
4053 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4054 num_tc, bp->max_cos);
4058 /* declare amount of supported traffic classes */
4059 if (netdev_set_num_tc(dev, num_tc)) {
4060 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4064 /* configure priority to traffic class mapping */
4065 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4066 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
4067 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4068 "mapping priority %d to tc %d\n",
4069 prio, bp->prio_to_cos[prio]);
4072 /* Use this configuration to differentiate tc0 from other COSes
4073 This can be used for ets or pfc, and save the effort of setting
4074 up a multio class queue disc or negotiating DCBX with a switch
4075 netdev_set_prio_tc_map(dev, 0, 0);
4076 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4077 for (prio = 1; prio < 16; prio++) {
4078 netdev_set_prio_tc_map(dev, prio, 1);
4079 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4082 /* configure traffic class to transmission queue mapping */
4083 for (cos = 0; cos < bp->max_cos; cos++) {
4084 count = BNX2X_NUM_ETH_QUEUES(bp);
4085 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4086 netdev_set_tc_queue(dev, cos, count, offset);
4087 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4088 "mapping tc %d to offset %d count %d\n",
4089 cos, offset, count);
4095 /* called with rtnl_lock */
4096 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4098 struct sockaddr *addr = p;
4099 struct bnx2x *bp = netdev_priv(dev);
4102 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4103 BNX2X_ERR("Requested MAC address is not valid\n");
4107 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4108 !is_zero_ether_addr(addr->sa_data)) {
4109 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
4113 if (netif_running(dev)) {
4114 rc = bnx2x_set_eth_mac(bp, false);
4119 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4121 if (netif_running(dev))
4122 rc = bnx2x_set_eth_mac(bp, true);
4127 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4129 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4130 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4135 if (IS_FCOE_IDX(fp_index)) {
4136 memset(sb, 0, sizeof(union host_hc_status_block));
4137 fp->status_blk_mapping = 0;
4140 if (!CHIP_IS_E1x(bp))
4141 BNX2X_PCI_FREE(sb->e2_sb,
4142 bnx2x_fp(bp, fp_index,
4143 status_blk_mapping),
4144 sizeof(struct host_hc_status_block_e2));
4146 BNX2X_PCI_FREE(sb->e1x_sb,
4147 bnx2x_fp(bp, fp_index,
4148 status_blk_mapping),
4149 sizeof(struct host_hc_status_block_e1x));
4153 if (!skip_rx_queue(bp, fp_index)) {
4154 bnx2x_free_rx_bds(fp);
4156 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4157 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4158 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4159 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4160 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4162 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4163 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4164 sizeof(struct eth_fast_path_rx_cqe) *
4168 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4169 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4170 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4171 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4175 if (!skip_tx_queue(bp, fp_index)) {
4176 /* fastpath tx rings: tx_buf tx_desc */
4177 for_each_cos_in_tx_queue(fp, cos) {
4178 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4180 DP(NETIF_MSG_IFDOWN,
4181 "freeing tx memory of fp %d cos %d cid %d\n",
4182 fp_index, cos, txdata->cid);
4184 BNX2X_FREE(txdata->tx_buf_ring);
4185 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4186 txdata->tx_desc_mapping,
4187 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4190 /* end of fastpath */
4193 void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4196 for_each_cnic_queue(bp, i)
4197 bnx2x_free_fp_mem_at(bp, i);
4200 void bnx2x_free_fp_mem(struct bnx2x *bp)
4203 for_each_eth_queue(bp, i)
4204 bnx2x_free_fp_mem_at(bp, i);
4207 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4209 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4210 if (!CHIP_IS_E1x(bp)) {
4211 bnx2x_fp(bp, index, sb_index_values) =
4212 (__le16 *)status_blk.e2_sb->sb.index_values;
4213 bnx2x_fp(bp, index, sb_running_index) =
4214 (__le16 *)status_blk.e2_sb->sb.running_index;
4216 bnx2x_fp(bp, index, sb_index_values) =
4217 (__le16 *)status_blk.e1x_sb->sb.index_values;
4218 bnx2x_fp(bp, index, sb_running_index) =
4219 (__le16 *)status_blk.e1x_sb->sb.running_index;
4223 /* Returns the number of actually allocated BDs */
4224 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4227 struct bnx2x *bp = fp->bp;
4228 u16 ring_prod, cqe_ring_prod;
4229 int i, failure_cnt = 0;
4231 fp->rx_comp_cons = 0;
4232 cqe_ring_prod = ring_prod = 0;
4234 /* This routine is called only during fo init so
4235 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4237 for (i = 0; i < rx_ring_size; i++) {
4238 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4242 ring_prod = NEXT_RX_IDX(ring_prod);
4243 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4244 WARN_ON(ring_prod <= (i - failure_cnt));
4248 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4249 i - failure_cnt, fp->index);
4251 fp->rx_bd_prod = ring_prod;
4252 /* Limit the CQE producer by the CQE ring size */
4253 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4255 fp->rx_pkt = fp->rx_calls = 0;
4257 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4259 return i - failure_cnt;
4262 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4266 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4267 struct eth_rx_cqe_next_page *nextpg;
4269 nextpg = (struct eth_rx_cqe_next_page *)
4270 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4272 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4273 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4275 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4276 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4280 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4282 union host_hc_status_block *sb;
4283 struct bnx2x_fastpath *fp = &bp->fp[index];
4286 int rx_ring_size = 0;
4288 if (!bp->rx_ring_size &&
4289 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
4290 rx_ring_size = MIN_RX_SIZE_NONTPA;
4291 bp->rx_ring_size = rx_ring_size;
4292 } else if (!bp->rx_ring_size) {
4293 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4295 if (CHIP_IS_E3(bp)) {
4296 u32 cfg = SHMEM_RD(bp,
4297 dev_info.port_hw_config[BP_PORT(bp)].
4300 /* Decrease ring size for 1G functions */
4301 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4302 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4306 /* allocate at least number of buffers required by FW */
4307 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4308 MIN_RX_SIZE_TPA, rx_ring_size);
4310 bp->rx_ring_size = rx_ring_size;
4311 } else /* if rx_ring_size specified - use it */
4312 rx_ring_size = bp->rx_ring_size;
4314 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4317 sb = &bnx2x_fp(bp, index, status_blk);
4319 if (!IS_FCOE_IDX(index)) {
4321 if (!CHIP_IS_E1x(bp))
4322 BNX2X_PCI_ALLOC(sb->e2_sb,
4323 &bnx2x_fp(bp, index, status_blk_mapping),
4324 sizeof(struct host_hc_status_block_e2));
4326 BNX2X_PCI_ALLOC(sb->e1x_sb,
4327 &bnx2x_fp(bp, index, status_blk_mapping),
4328 sizeof(struct host_hc_status_block_e1x));
4331 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4332 * set shortcuts for it.
4334 if (!IS_FCOE_IDX(index))
4335 set_sb_shortcuts(bp, index);
4338 if (!skip_tx_queue(bp, index)) {
4339 /* fastpath tx rings: tx_buf tx_desc */
4340 for_each_cos_in_tx_queue(fp, cos) {
4341 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4344 "allocating tx memory of fp %d cos %d\n",
4347 BNX2X_ALLOC(txdata->tx_buf_ring,
4348 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4349 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4350 &txdata->tx_desc_mapping,
4351 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4356 if (!skip_rx_queue(bp, index)) {
4357 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4358 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4359 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4360 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4361 &bnx2x_fp(bp, index, rx_desc_mapping),
4362 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4364 /* Seed all CQEs by 1s */
4365 BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4366 &bnx2x_fp(bp, index, rx_comp_mapping),
4367 sizeof(struct eth_fast_path_rx_cqe) *
4371 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4372 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4373 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4374 &bnx2x_fp(bp, index, rx_sge_mapping),
4375 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4377 bnx2x_set_next_page_rx_bd(fp);
4380 bnx2x_set_next_page_rx_cq(fp);
4383 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4384 if (ring_size < rx_ring_size)
4390 /* handles low memory cases */
4392 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4394 /* FW will drop all packets if queue is not big enough,
4395 * In these cases we disable the queue
4396 * Min size is different for OOO, TPA and non-TPA queues
4398 if (ring_size < (fp->disable_tpa ?
4399 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4400 /* release memory allocated for this queue */
4401 bnx2x_free_fp_mem_at(bp, index);
4407 int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4411 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4412 /* we will fail load process instead of mark
4420 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4424 /* 1. Allocate FP for leading - fatal if error
4425 * 2. Allocate RSS - fix number of queues if error
4429 if (bnx2x_alloc_fp_mem_at(bp, 0))
4433 for_each_nondefault_eth_queue(bp, i)
4434 if (bnx2x_alloc_fp_mem_at(bp, i))
4437 /* handle memory failures */
4438 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4439 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4442 bnx2x_shrink_eth_fp(bp, delta);
4443 if (CNIC_SUPPORT(bp))
4444 /* move non eth FPs next to last eth FP
4445 * must be done in that order
4446 * FCOE_IDX < FWD_IDX < OOO_IDX
4449 /* move FCoE fp even NO_FCOE_FLAG is on */
4450 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4451 bp->num_ethernet_queues -= delta;
4452 bp->num_queues = bp->num_ethernet_queues +
4453 bp->num_cnic_queues;
4454 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4455 bp->num_queues + delta, bp->num_queues);
4461 void bnx2x_free_mem_bp(struct bnx2x *bp)
4465 for (i = 0; i < bp->fp_array_size; i++)
4466 kfree(bp->fp[i].tpa_info);
4469 kfree(bp->fp_stats);
4470 kfree(bp->bnx2x_txq);
4471 kfree(bp->msix_table);
4475 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4477 struct bnx2x_fastpath *fp;
4478 struct msix_entry *tbl;
4479 struct bnx2x_ilt *ilt;
4480 int msix_table_size = 0;
4481 int fp_array_size, txq_array_size;
4485 * The biggest MSI-X table we might need is as a maximum number of fast
4486 * path IGU SBs plus default SB (for PF only).
4488 msix_table_size = bp->igu_sb_cnt;
4491 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4493 /* fp array: RSS plus CNIC related L2 queues */
4494 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4495 bp->fp_array_size = fp_array_size;
4496 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4498 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4501 for (i = 0; i < bp->fp_array_size; i++) {
4503 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4504 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4505 if (!(fp[i].tpa_info))
4511 /* allocate sp objs */
4512 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4517 /* allocate fp_stats */
4518 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4523 /* Allocate memory for the transmission queues array */
4525 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4526 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4528 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4534 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4537 bp->msix_table = tbl;
4540 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4547 bnx2x_free_mem_bp(bp);
4551 int bnx2x_reload_if_running(struct net_device *dev)
4553 struct bnx2x *bp = netdev_priv(dev);
4555 if (unlikely(!netif_running(dev)))
4558 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4559 return bnx2x_nic_load(bp, LOAD_NORMAL);
4562 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4564 u32 sel_phy_idx = 0;
4565 if (bp->link_params.num_phys <= 1)
4568 if (bp->link_vars.link_up) {
4569 sel_phy_idx = EXT_PHY1;
4570 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4571 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4572 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4573 sel_phy_idx = EXT_PHY2;
4576 switch (bnx2x_phy_selection(&bp->link_params)) {
4577 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4578 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4579 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4580 sel_phy_idx = EXT_PHY1;
4582 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4583 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4584 sel_phy_idx = EXT_PHY2;
4591 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4593 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4595 * The selected activated PHY is always after swapping (in case PHY
4596 * swapping is enabled). So when swapping is enabled, we need to reverse
4600 if (bp->link_params.multi_phy_config &
4601 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4602 if (sel_phy_idx == EXT_PHY1)
4603 sel_phy_idx = EXT_PHY2;
4604 else if (sel_phy_idx == EXT_PHY2)
4605 sel_phy_idx = EXT_PHY1;
4607 return LINK_CONFIG_IDX(sel_phy_idx);
4610 #ifdef NETDEV_FCOE_WWNN
4611 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4613 struct bnx2x *bp = netdev_priv(dev);
4614 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4617 case NETDEV_FCOE_WWNN:
4618 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4619 cp->fcoe_wwn_node_name_lo);
4621 case NETDEV_FCOE_WWPN:
4622 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4623 cp->fcoe_wwn_port_name_lo);
4626 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4634 /* called with rtnl_lock */
4635 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4637 struct bnx2x *bp = netdev_priv(dev);
4639 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4640 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4644 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4645 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4646 BNX2X_ERR("Can't support requested MTU size\n");
4650 /* This does not race with packet allocation
4651 * because the actual alloc size is
4652 * only updated as part of load
4656 return bnx2x_reload_if_running(dev);
4659 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4660 netdev_features_t features)
4662 struct bnx2x *bp = netdev_priv(dev);
4664 /* TPA requires Rx CSUM offloading */
4665 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
4666 features &= ~NETIF_F_LRO;
4667 features &= ~NETIF_F_GRO;
4673 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4675 struct bnx2x *bp = netdev_priv(dev);
4676 u32 flags = bp->flags;
4678 bool bnx2x_reload = false;
4680 if (features & NETIF_F_LRO)
4681 flags |= TPA_ENABLE_FLAG;
4683 flags &= ~TPA_ENABLE_FLAG;
4685 if (features & NETIF_F_GRO)
4686 flags |= GRO_ENABLE_FLAG;
4688 flags &= ~GRO_ENABLE_FLAG;
4690 if (features & NETIF_F_LOOPBACK) {
4691 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4692 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4693 bnx2x_reload = true;
4696 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4697 bp->link_params.loopback_mode = LOOPBACK_NONE;
4698 bnx2x_reload = true;
4702 changes = flags ^ bp->flags;
4704 /* if GRO is changed while LRO is enabled, don't force a reload */
4705 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4706 changes &= ~GRO_ENABLE_FLAG;
4709 bnx2x_reload = true;
4714 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4715 return bnx2x_reload_if_running(dev);
4716 /* else: bnx2x_nic_load() will be called at end of recovery */
4722 void bnx2x_tx_timeout(struct net_device *dev)
4724 struct bnx2x *bp = netdev_priv(dev);
4726 #ifdef BNX2X_STOP_ON_ERROR
4731 smp_mb__before_clear_bit();
4732 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4733 smp_mb__after_clear_bit();
4735 /* This allows the netif to be shutdown gracefully before resetting */
4736 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4739 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4741 struct net_device *dev = pci_get_drvdata(pdev);
4745 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4748 bp = netdev_priv(dev);
4752 pci_save_state(pdev);
4754 if (!netif_running(dev)) {
4759 netif_device_detach(dev);
4761 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4763 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4770 int bnx2x_resume(struct pci_dev *pdev)
4772 struct net_device *dev = pci_get_drvdata(pdev);
4777 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4780 bp = netdev_priv(dev);
4782 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4783 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4789 pci_restore_state(pdev);
4791 if (!netif_running(dev)) {
4796 bnx2x_set_power_state(bp, PCI_D0);
4797 netif_device_attach(dev);
4799 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4806 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4810 BNX2X_ERR("bad context pointer %p\n", cxt);
4814 /* ustorm cxt validation */
4815 cxt->ustorm_ag_context.cdu_usage =
4816 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4817 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4818 /* xcontext validation */
4819 cxt->xstorm_ag_context.cdu_reserved =
4820 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4821 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4824 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4825 u8 fw_sb_id, u8 sb_index,
4828 u32 addr = BAR_CSTRORM_INTMEM +
4829 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4830 REG_WR8(bp, addr, ticks);
4832 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4833 port, fw_sb_id, sb_index, ticks);
4836 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4837 u16 fw_sb_id, u8 sb_index,
4840 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4841 u32 addr = BAR_CSTRORM_INTMEM +
4842 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4843 u8 flags = REG_RD8(bp, addr);
4845 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4846 flags |= enable_flag;
4847 REG_WR8(bp, addr, flags);
4849 "port %x fw_sb_id %d sb_index %d disable %d\n",
4850 port, fw_sb_id, sb_index, disable);
4853 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4854 u8 sb_index, u8 disable, u16 usec)
4856 int port = BP_PORT(bp);
4857 u8 ticks = usec / BNX2X_BTR;
4859 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4861 disable = disable ? 1 : (usec ? 0 : 1);
4862 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);