1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2013 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
26 #include <net/ip6_checksum.h>
27 #include <net/busy_poll.h>
28 #include <linux/prefetch.h>
29 #include "bnx2x_cmn.h"
30 #include "bnx2x_init.h"
33 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
34 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
35 static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
36 static int bnx2x_poll(struct napi_struct *napi, int budget);
38 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
42 /* Add NAPI objects */
43 for_each_rx_queue_cnic(bp, i) {
44 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
45 bnx2x_poll, NAPI_POLL_WEIGHT);
46 napi_hash_add(&bnx2x_fp(bp, i, napi));
50 static void bnx2x_add_all_napi(struct bnx2x *bp)
54 /* Add NAPI objects */
55 for_each_eth_queue(bp, i) {
56 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
57 bnx2x_poll, NAPI_POLL_WEIGHT);
58 napi_hash_add(&bnx2x_fp(bp, i, napi));
62 static int bnx2x_calc_num_queues(struct bnx2x *bp)
64 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
66 /* Reduce memory usage in kdump environment by using only one queue */
70 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
75 * bnx2x_move_fp - move content of the fastpath structure.
78 * @from: source FP index
79 * @to: destination FP index
81 * Makes sure the contents of the bp->fp[to].napi is kept
82 * intact. This is done by first copying the napi struct from
83 * the target to the source, and then mem copying the entire
84 * source onto the target. Update txdata pointers and related
87 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
89 struct bnx2x_fastpath *from_fp = &bp->fp[from];
90 struct bnx2x_fastpath *to_fp = &bp->fp[to];
91 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
92 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
93 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
94 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
95 int old_max_eth_txqs, new_max_eth_txqs;
96 int old_txdata_index = 0, new_txdata_index = 0;
97 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
99 /* Copy the NAPI object as it has been already initialized */
100 from_fp->napi = to_fp->napi;
102 /* Move bnx2x_fastpath contents */
103 memcpy(to_fp, from_fp, sizeof(*to_fp));
106 /* Retain the tpa_info of the original `to' version as we don't want
107 * 2 FPs to contain the same tpa_info pointer.
109 to_fp->tpa_info = old_tpa_info;
111 /* move sp_objs contents as well, as their indices match fp ones */
112 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
114 /* move fp_stats contents as well, as their indices match fp ones */
115 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
117 /* Update txdata pointers in fp and move txdata content accordingly:
118 * Each fp consumes 'max_cos' txdata structures, so the index should be
119 * decremented by max_cos x delta.
122 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
123 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
125 if (from == FCOE_IDX(bp)) {
126 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
127 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
130 memcpy(&bp->bnx2x_txq[new_txdata_index],
131 &bp->bnx2x_txq[old_txdata_index],
132 sizeof(struct bnx2x_fp_txdata));
133 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
137 * bnx2x_fill_fw_str - Fill buffer with FW version string.
140 * @buf: character buffer to fill with the fw name
141 * @buf_len: length of the above buffer
144 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
147 u8 phy_fw_ver[PHY_FW_VER_LEN];
149 phy_fw_ver[0] = '\0';
150 bnx2x_get_ext_phy_fw_version(&bp->link_params,
151 phy_fw_ver, PHY_FW_VER_LEN);
152 strlcpy(buf, bp->fw_ver, buf_len);
153 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
155 (bp->common.bc_ver & 0xff0000) >> 16,
156 (bp->common.bc_ver & 0xff00) >> 8,
157 (bp->common.bc_ver & 0xff),
158 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
160 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
165 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
168 * @delta: number of eth queues which were not allocated
170 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
172 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
174 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
175 * backward along the array could cause memory to be overridden
177 for (cos = 1; cos < bp->max_cos; cos++) {
178 for (i = 0; i < old_eth_num - delta; i++) {
179 struct bnx2x_fastpath *fp = &bp->fp[i];
180 int new_idx = cos * (old_eth_num - delta) + i;
182 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
183 sizeof(struct bnx2x_fp_txdata));
184 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
189 int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
191 /* free skb in the packet ring at pos idx
192 * return idx of last bd freed
194 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
195 u16 idx, unsigned int *pkts_compl,
196 unsigned int *bytes_compl)
198 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
199 struct eth_tx_start_bd *tx_start_bd;
200 struct eth_tx_bd *tx_data_bd;
201 struct sk_buff *skb = tx_buf->skb;
202 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
204 u16 split_bd_len = 0;
206 /* prefetch skb end pointer to speedup dev_kfree_skb() */
209 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
210 txdata->txq_index, idx, tx_buf, skb);
212 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
214 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
215 #ifdef BNX2X_STOP_ON_ERROR
216 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
217 BNX2X_ERR("BAD nbd!\n");
221 new_cons = nbd + tx_buf->first_bd;
223 /* Get the next bd */
224 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
226 /* Skip a parse bd... */
228 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
230 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
231 /* Skip second parse bd... */
233 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
236 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
237 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
238 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
239 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
241 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
245 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
246 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
252 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
253 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
254 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
256 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
263 (*bytes_compl) += skb->len;
266 dev_kfree_skb_any(skb);
267 tx_buf->first_bd = 0;
273 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
275 struct netdev_queue *txq;
276 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
277 unsigned int pkts_compl = 0, bytes_compl = 0;
279 #ifdef BNX2X_STOP_ON_ERROR
280 if (unlikely(bp->panic))
284 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
285 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
286 sw_cons = txdata->tx_pkt_cons;
288 while (sw_cons != hw_cons) {
291 pkt_cons = TX_BD(sw_cons);
293 DP(NETIF_MSG_TX_DONE,
294 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
295 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
297 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
298 &pkts_compl, &bytes_compl);
303 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
305 txdata->tx_pkt_cons = sw_cons;
306 txdata->tx_bd_cons = bd_cons;
308 /* Need to make the tx_bd_cons update visible to start_xmit()
309 * before checking for netif_tx_queue_stopped(). Without the
310 * memory barrier, there is a small possibility that
311 * start_xmit() will miss it and cause the queue to be stopped
313 * On the other hand we need an rmb() here to ensure the proper
314 * ordering of bit testing in the following
315 * netif_tx_queue_stopped(txq) call.
319 if (unlikely(netif_tx_queue_stopped(txq))) {
320 /* Taking tx_lock() is needed to prevent re-enabling the queue
321 * while it's empty. This could have happen if rx_action() gets
322 * suspended in bnx2x_tx_int() after the condition before
323 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
325 * stops the queue->sees fresh tx_bd_cons->releases the queue->
326 * sends some packets consuming the whole queue again->
330 __netif_tx_lock(txq, smp_processor_id());
332 if ((netif_tx_queue_stopped(txq)) &&
333 (bp->state == BNX2X_STATE_OPEN) &&
334 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
335 netif_tx_wake_queue(txq);
337 __netif_tx_unlock(txq);
342 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
345 u16 last_max = fp->last_max_sge;
347 if (SUB_S16(idx, last_max) > 0)
348 fp->last_max_sge = idx;
351 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
353 struct eth_end_agg_rx_cqe *cqe)
355 struct bnx2x *bp = fp->bp;
356 u16 last_max, last_elem, first_elem;
363 /* First mark all used pages */
364 for (i = 0; i < sge_len; i++)
365 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
366 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
368 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
369 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
371 /* Here we assume that the last SGE index is the biggest */
372 prefetch((void *)(fp->sge_mask));
373 bnx2x_update_last_max_sge(fp,
374 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
376 last_max = RX_SGE(fp->last_max_sge);
377 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
378 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
380 /* If ring is not full */
381 if (last_elem + 1 != first_elem)
384 /* Now update the prod */
385 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
386 if (likely(fp->sge_mask[i]))
389 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
390 delta += BIT_VEC64_ELEM_SZ;
394 fp->rx_sge_prod += delta;
395 /* clear page-end entries */
396 bnx2x_clear_sge_mask_next_elems(fp);
399 DP(NETIF_MSG_RX_STATUS,
400 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
401 fp->last_max_sge, fp->rx_sge_prod);
404 /* Get Toeplitz hash value in the skb using the value from the
405 * CQE (calculated by HW).
407 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
408 const struct eth_fast_path_rx_cqe *cqe,
409 enum pkt_hash_types *rxhash_type)
411 /* Get Toeplitz hash from CQE */
412 if ((bp->dev->features & NETIF_F_RXHASH) &&
413 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
414 enum eth_rss_hash_type htype;
416 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
417 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
418 (htype == TCP_IPV6_HASH_TYPE)) ?
419 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
421 return le32_to_cpu(cqe->rss_hash_result);
423 *rxhash_type = PKT_HASH_TYPE_NONE;
427 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
429 struct eth_fast_path_rx_cqe *cqe)
431 struct bnx2x *bp = fp->bp;
432 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
433 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
434 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
436 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
437 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
439 /* print error if current state != stop */
440 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
441 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
443 /* Try to map an empty data buffer from the aggregation info */
444 mapping = dma_map_single(&bp->pdev->dev,
445 first_buf->data + NET_SKB_PAD,
446 fp->rx_buf_size, DMA_FROM_DEVICE);
448 * ...if it fails - move the skb from the consumer to the producer
449 * and set the current aggregation state as ERROR to drop it
450 * when TPA_STOP arrives.
453 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
454 /* Move the BD from the consumer to the producer */
455 bnx2x_reuse_rx_data(fp, cons, prod);
456 tpa_info->tpa_state = BNX2X_TPA_ERROR;
460 /* move empty data from pool to prod */
461 prod_rx_buf->data = first_buf->data;
462 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
463 /* point prod_bd to new data */
464 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
465 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
467 /* move partial skb from cons to pool (don't unmap yet) */
468 *first_buf = *cons_rx_buf;
470 /* mark bin state as START */
471 tpa_info->parsing_flags =
472 le16_to_cpu(cqe->pars_flags.flags);
473 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
474 tpa_info->tpa_state = BNX2X_TPA_START;
475 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
476 tpa_info->placement_offset = cqe->placement_offset;
477 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
478 if (fp->mode == TPA_MODE_GRO) {
479 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
480 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
481 tpa_info->gro_size = gro_size;
484 #ifdef BNX2X_STOP_ON_ERROR
485 fp->tpa_queue_used |= (1 << queue);
486 #ifdef _ASM_GENERIC_INT_L64_H
487 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
489 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
495 /* Timestamp option length allowed for TPA aggregation:
497 * nop nop kind length echo val
499 #define TPA_TSTAMP_OPT_LEN 12
501 * bnx2x_set_gro_params - compute GRO values
504 * @parsing_flags: parsing flags from the START CQE
505 * @len_on_bd: total length of the first packet for the
507 * @pkt_len: length of all segments
509 * Approximate value of the MSS for this aggregation calculated using
510 * the first packet of it.
511 * Compute number of aggregated segments, and gso_type.
513 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
514 u16 len_on_bd, unsigned int pkt_len,
515 u16 num_of_coalesced_segs)
517 /* TPA aggregation won't have either IP options or TCP options
518 * other than timestamp or IPv6 extension headers.
520 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
522 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
523 PRS_FLAG_OVERETH_IPV6) {
524 hdrs_len += sizeof(struct ipv6hdr);
525 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
527 hdrs_len += sizeof(struct iphdr);
528 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
531 /* Check if there was a TCP timestamp, if there is it's will
532 * always be 12 bytes length: nop nop kind length echo val.
534 * Otherwise FW would close the aggregation.
536 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
537 hdrs_len += TPA_TSTAMP_OPT_LEN;
539 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
541 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
542 * to skb_shinfo(skb)->gso_segs
544 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
547 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
548 u16 index, gfp_t gfp_mask)
550 struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
551 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
552 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
555 if (unlikely(page == NULL)) {
556 BNX2X_ERR("Can't alloc sge\n");
560 mapping = dma_map_page(&bp->pdev->dev, page, 0,
561 SGE_PAGES, DMA_FROM_DEVICE);
562 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
563 __free_pages(page, PAGES_PER_SGE_SHIFT);
564 BNX2X_ERR("Can't map sge\n");
569 dma_unmap_addr_set(sw_buf, mapping, mapping);
571 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
572 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
577 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
578 struct bnx2x_agg_info *tpa_info,
581 struct eth_end_agg_rx_cqe *cqe,
584 struct sw_rx_page *rx_pg, old_rx_pg;
585 u32 i, frag_len, frag_size;
586 int err, j, frag_id = 0;
587 u16 len_on_bd = tpa_info->len_on_bd;
588 u16 full_page = 0, gro_size = 0;
590 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
592 if (fp->mode == TPA_MODE_GRO) {
593 gro_size = tpa_info->gro_size;
594 full_page = tpa_info->full_page;
597 /* This is needed in order to enable forwarding support */
599 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
600 le16_to_cpu(cqe->pkt_len),
601 le16_to_cpu(cqe->num_of_coalesced_segs));
603 #ifdef BNX2X_STOP_ON_ERROR
604 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
605 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
607 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
613 /* Run through the SGL and compose the fragmented skb */
614 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
615 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
617 /* FW gives the indices of the SGE as if the ring is an array
618 (meaning that "next" element will consume 2 indices) */
619 if (fp->mode == TPA_MODE_GRO)
620 frag_len = min_t(u32, frag_size, (u32)full_page);
622 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
624 rx_pg = &fp->rx_page_ring[sge_idx];
627 /* If we fail to allocate a substitute page, we simply stop
628 where we are and drop the whole packet */
629 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
631 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
635 /* Unmap the page as we're going to pass it to the stack */
636 dma_unmap_page(&bp->pdev->dev,
637 dma_unmap_addr(&old_rx_pg, mapping),
638 SGE_PAGES, DMA_FROM_DEVICE);
639 /* Add one frag and update the appropriate fields in the skb */
640 if (fp->mode == TPA_MODE_LRO)
641 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
645 for (rem = frag_len; rem > 0; rem -= gro_size) {
646 int len = rem > gro_size ? gro_size : rem;
647 skb_fill_page_desc(skb, frag_id++,
648 old_rx_pg.page, offset, len);
650 get_page(old_rx_pg.page);
655 skb->data_len += frag_len;
656 skb->truesize += SGE_PAGES;
657 skb->len += frag_len;
659 frag_size -= frag_len;
665 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
667 if (fp->rx_frag_size)
668 put_page(virt_to_head_page(data));
673 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
675 if (fp->rx_frag_size) {
676 /* GFP_KERNEL allocations are used only during initialization */
677 if (unlikely(gfp_mask & __GFP_WAIT))
678 return (void *)__get_free_page(gfp_mask);
680 return netdev_alloc_frag(fp->rx_frag_size);
683 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
687 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
689 const struct iphdr *iph = ip_hdr(skb);
692 skb_set_transport_header(skb, sizeof(struct iphdr));
695 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
696 iph->saddr, iph->daddr, 0);
699 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
701 struct ipv6hdr *iph = ipv6_hdr(skb);
704 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
707 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
708 &iph->saddr, &iph->daddr, 0);
711 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
712 void (*gro_func)(struct bnx2x*, struct sk_buff*))
714 skb_set_network_header(skb, 0);
716 tcp_gro_complete(skb);
720 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
724 if (skb_shinfo(skb)->gso_size) {
725 switch (be16_to_cpu(skb->protocol)) {
727 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
730 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
733 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
734 be16_to_cpu(skb->protocol));
738 skb_record_rx_queue(skb, fp->rx_queue);
739 napi_gro_receive(&fp->napi, skb);
742 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
743 struct bnx2x_agg_info *tpa_info,
745 struct eth_end_agg_rx_cqe *cqe,
748 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
749 u8 pad = tpa_info->placement_offset;
750 u16 len = tpa_info->len_on_bd;
751 struct sk_buff *skb = NULL;
752 u8 *new_data, *data = rx_buf->data;
753 u8 old_tpa_state = tpa_info->tpa_state;
755 tpa_info->tpa_state = BNX2X_TPA_STOP;
757 /* If we there was an error during the handling of the TPA_START -
758 * drop this aggregation.
760 if (old_tpa_state == BNX2X_TPA_ERROR)
763 /* Try to allocate the new data */
764 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
765 /* Unmap skb in the pool anyway, as we are going to change
766 pool entry status to BNX2X_TPA_STOP even if new skb allocation
768 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
769 fp->rx_buf_size, DMA_FROM_DEVICE);
770 if (likely(new_data))
771 skb = build_skb(data, fp->rx_frag_size);
774 #ifdef BNX2X_STOP_ON_ERROR
775 if (pad + len > fp->rx_buf_size) {
776 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
777 pad, len, fp->rx_buf_size);
783 skb_reserve(skb, pad + NET_SKB_PAD);
785 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
787 skb->protocol = eth_type_trans(skb, bp->dev);
788 skb->ip_summed = CHECKSUM_UNNECESSARY;
790 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
791 skb, cqe, cqe_idx)) {
792 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
793 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
794 bnx2x_gro_receive(bp, fp, skb);
796 DP(NETIF_MSG_RX_STATUS,
797 "Failed to allocate new pages - dropping packet!\n");
798 dev_kfree_skb_any(skb);
801 /* put new data in bin */
802 rx_buf->data = new_data;
807 bnx2x_frag_free(fp, new_data);
809 /* drop the packet and keep the buffer in the bin */
810 DP(NETIF_MSG_RX_STATUS,
811 "Failed to allocate or map a new skb - dropping packet!\n");
812 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
815 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
816 u16 index, gfp_t gfp_mask)
819 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
820 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
823 data = bnx2x_frag_alloc(fp, gfp_mask);
824 if (unlikely(data == NULL))
827 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
830 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
831 bnx2x_frag_free(fp, data);
832 BNX2X_ERR("Can't map rx data\n");
837 dma_unmap_addr_set(rx_buf, mapping, mapping);
839 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
840 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
846 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
847 struct bnx2x_fastpath *fp,
848 struct bnx2x_eth_q_stats *qstats)
850 /* Do nothing if no L4 csum validation was done.
851 * We do not check whether IP csum was validated. For IPv4 we assume
852 * that if the card got as far as validating the L4 csum, it also
853 * validated the IP csum. IPv6 has no IP csum.
855 if (cqe->fast_path_cqe.status_flags &
856 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
859 /* If L4 validation was done, check if an error was found. */
861 if (cqe->fast_path_cqe.type_error_flags &
862 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
863 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
864 qstats->hw_csum_err++;
866 skb->ip_summed = CHECKSUM_UNNECESSARY;
869 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
871 struct bnx2x *bp = fp->bp;
872 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
873 u16 sw_comp_cons, sw_comp_prod;
875 union eth_rx_cqe *cqe;
876 struct eth_fast_path_rx_cqe *cqe_fp;
878 #ifdef BNX2X_STOP_ON_ERROR
879 if (unlikely(bp->panic))
885 bd_cons = fp->rx_bd_cons;
886 bd_prod = fp->rx_bd_prod;
887 bd_prod_fw = bd_prod;
888 sw_comp_cons = fp->rx_comp_cons;
889 sw_comp_prod = fp->rx_comp_prod;
891 comp_ring_cons = RCQ_BD(sw_comp_cons);
892 cqe = &fp->rx_comp_ring[comp_ring_cons];
893 cqe_fp = &cqe->fast_path_cqe;
895 DP(NETIF_MSG_RX_STATUS,
896 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
898 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
899 struct sw_rx_bd *rx_buf = NULL;
902 enum eth_rx_cqe_type cqe_fp_type;
906 enum pkt_hash_types rxhash_type;
908 #ifdef BNX2X_STOP_ON_ERROR
909 if (unlikely(bp->panic))
913 bd_prod = RX_BD(bd_prod);
914 bd_cons = RX_BD(bd_cons);
916 /* A rmb() is required to ensure that the CQE is not read
917 * before it is written by the adapter DMA. PCI ordering
918 * rules will make sure the other fields are written before
919 * the marker at the end of struct eth_fast_path_rx_cqe
920 * but without rmb() a weakly ordered processor can process
921 * stale data. Without the barrier TPA state-machine might
922 * enter inconsistent state and kernel stack might be
923 * provided with incorrect packet description - these lead
924 * to various kernel crashed.
928 cqe_fp_flags = cqe_fp->type_error_flags;
929 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
931 DP(NETIF_MSG_RX_STATUS,
932 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
933 CQE_TYPE(cqe_fp_flags),
934 cqe_fp_flags, cqe_fp->status_flags,
935 le32_to_cpu(cqe_fp->rss_hash_result),
936 le16_to_cpu(cqe_fp->vlan_tag),
937 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
939 /* is this a slowpath msg? */
940 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
941 bnx2x_sp_event(fp, cqe);
945 rx_buf = &fp->rx_buf_ring[bd_cons];
948 if (!CQE_TYPE_FAST(cqe_fp_type)) {
949 struct bnx2x_agg_info *tpa_info;
950 u16 frag_size, pages;
951 #ifdef BNX2X_STOP_ON_ERROR
953 if (fp->disable_tpa &&
954 (CQE_TYPE_START(cqe_fp_type) ||
955 CQE_TYPE_STOP(cqe_fp_type)))
956 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
957 CQE_TYPE(cqe_fp_type));
960 if (CQE_TYPE_START(cqe_fp_type)) {
961 u16 queue = cqe_fp->queue_index;
962 DP(NETIF_MSG_RX_STATUS,
963 "calling tpa_start on queue %d\n",
966 bnx2x_tpa_start(fp, queue,
972 queue = cqe->end_agg_cqe.queue_index;
973 tpa_info = &fp->tpa_info[queue];
974 DP(NETIF_MSG_RX_STATUS,
975 "calling tpa_stop on queue %d\n",
978 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
981 if (fp->mode == TPA_MODE_GRO)
982 pages = (frag_size + tpa_info->full_page - 1) /
985 pages = SGE_PAGE_ALIGN(frag_size) >>
988 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
989 &cqe->end_agg_cqe, comp_ring_cons);
990 #ifdef BNX2X_STOP_ON_ERROR
995 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
999 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
1000 pad = cqe_fp->placement_offset;
1001 dma_sync_single_for_cpu(&bp->pdev->dev,
1002 dma_unmap_addr(rx_buf, mapping),
1003 pad + RX_COPY_THRESH,
1006 prefetch(data + pad); /* speedup eth_type_trans() */
1007 /* is this an error packet? */
1008 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1009 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1010 "ERROR flags %x rx packet %u\n",
1011 cqe_fp_flags, sw_comp_cons);
1012 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1016 /* Since we don't have a jumbo ring
1017 * copy small packets if mtu > 1500
1019 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1020 (len <= RX_COPY_THRESH)) {
1021 skb = netdev_alloc_skb_ip_align(bp->dev, len);
1023 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1024 "ERROR packet dropped because of alloc failure\n");
1025 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1028 memcpy(skb->data, data + pad, len);
1029 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1031 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1032 GFP_ATOMIC) == 0)) {
1033 dma_unmap_single(&bp->pdev->dev,
1034 dma_unmap_addr(rx_buf, mapping),
1037 skb = build_skb(data, fp->rx_frag_size);
1038 if (unlikely(!skb)) {
1039 bnx2x_frag_free(fp, data);
1040 bnx2x_fp_qstats(bp, fp)->
1041 rx_skb_alloc_failed++;
1044 skb_reserve(skb, pad);
1046 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1047 "ERROR packet dropped because of alloc failure\n");
1048 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1050 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1056 skb->protocol = eth_type_trans(skb, bp->dev);
1058 /* Set Toeplitz hash for a none-LRO skb */
1059 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1060 skb_set_hash(skb, rxhash, rxhash_type);
1062 skb_checksum_none_assert(skb);
1064 if (bp->dev->features & NETIF_F_RXCSUM)
1065 bnx2x_csum_validate(skb, cqe, fp,
1066 bnx2x_fp_qstats(bp, fp));
1068 skb_record_rx_queue(skb, fp->rx_queue);
1070 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1072 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1073 le16_to_cpu(cqe_fp->vlan_tag));
1075 skb_mark_napi_id(skb, &fp->napi);
1077 if (bnx2x_fp_ll_polling(fp))
1078 netif_receive_skb(skb);
1080 napi_gro_receive(&fp->napi, skb);
1082 rx_buf->data = NULL;
1084 bd_cons = NEXT_RX_IDX(bd_cons);
1085 bd_prod = NEXT_RX_IDX(bd_prod);
1086 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1089 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1090 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1092 /* mark CQE as free */
1093 BNX2X_SEED_CQE(cqe_fp);
1095 if (rx_pkt == budget)
1098 comp_ring_cons = RCQ_BD(sw_comp_cons);
1099 cqe = &fp->rx_comp_ring[comp_ring_cons];
1100 cqe_fp = &cqe->fast_path_cqe;
1103 fp->rx_bd_cons = bd_cons;
1104 fp->rx_bd_prod = bd_prod_fw;
1105 fp->rx_comp_cons = sw_comp_cons;
1106 fp->rx_comp_prod = sw_comp_prod;
1108 /* Update producers */
1109 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1112 fp->rx_pkt += rx_pkt;
1118 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1120 struct bnx2x_fastpath *fp = fp_cookie;
1121 struct bnx2x *bp = fp->bp;
1125 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1126 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1128 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1130 #ifdef BNX2X_STOP_ON_ERROR
1131 if (unlikely(bp->panic))
1135 /* Handle Rx and Tx according to MSI-X vector */
1136 for_each_cos_in_tx_queue(fp, cos)
1137 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1139 prefetch(&fp->sb_running_index[SM_RX_ID]);
1140 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1145 /* HW Lock for shared dual port PHYs */
1146 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1148 mutex_lock(&bp->port.phy_mutex);
1150 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1153 void bnx2x_release_phy_lock(struct bnx2x *bp)
1155 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1157 mutex_unlock(&bp->port.phy_mutex);
1160 /* calculates MF speed according to current linespeed and MF configuration */
1161 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1163 u16 line_speed = bp->link_vars.line_speed;
1165 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1166 bp->mf_config[BP_VN(bp)]);
1168 /* Calculate the current MAX line speed limit for the MF
1172 line_speed = (line_speed * maxCfg) / 100;
1173 else { /* SD mode */
1174 u16 vn_max_rate = maxCfg * 100;
1176 if (vn_max_rate < line_speed)
1177 line_speed = vn_max_rate;
1185 * bnx2x_fill_report_data - fill link report data to report
1187 * @bp: driver handle
1188 * @data: link state to update
1190 * It uses a none-atomic bit operations because is called under the mutex.
1192 static void bnx2x_fill_report_data(struct bnx2x *bp,
1193 struct bnx2x_link_report_data *data)
1195 memset(data, 0, sizeof(*data));
1198 /* Fill the report data: effective line speed */
1199 data->line_speed = bnx2x_get_mf_speed(bp);
1202 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1203 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1204 &data->link_report_flags);
1206 if (!BNX2X_NUM_ETH_QUEUES(bp))
1207 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1208 &data->link_report_flags);
1211 if (bp->link_vars.duplex == DUPLEX_FULL)
1212 __set_bit(BNX2X_LINK_REPORT_FD,
1213 &data->link_report_flags);
1215 /* Rx Flow Control is ON */
1216 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1217 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1218 &data->link_report_flags);
1220 /* Tx Flow Control is ON */
1221 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1222 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1223 &data->link_report_flags);
1225 *data = bp->vf_link_vars;
1230 * bnx2x_link_report - report link status to OS.
1232 * @bp: driver handle
1234 * Calls the __bnx2x_link_report() under the same locking scheme
1235 * as a link/PHY state managing code to ensure a consistent link
1239 void bnx2x_link_report(struct bnx2x *bp)
1241 bnx2x_acquire_phy_lock(bp);
1242 __bnx2x_link_report(bp);
1243 bnx2x_release_phy_lock(bp);
1247 * __bnx2x_link_report - report link status to OS.
1249 * @bp: driver handle
1251 * None atomic implementation.
1252 * Should be called under the phy_lock.
1254 void __bnx2x_link_report(struct bnx2x *bp)
1256 struct bnx2x_link_report_data cur_data;
1259 if (IS_PF(bp) && !CHIP_IS_E1(bp))
1260 bnx2x_read_mf_cfg(bp);
1262 /* Read the current link report info */
1263 bnx2x_fill_report_data(bp, &cur_data);
1265 /* Don't report link down or exactly the same link status twice */
1266 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1267 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1268 &bp->last_reported_link.link_report_flags) &&
1269 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1270 &cur_data.link_report_flags)))
1275 /* We are going to report a new link parameters now -
1276 * remember the current data for the next time.
1278 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1280 /* propagate status to VFs */
1282 bnx2x_iov_link_update(bp);
1284 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1285 &cur_data.link_report_flags)) {
1286 netif_carrier_off(bp->dev);
1287 netdev_err(bp->dev, "NIC Link is Down\n");
1293 netif_carrier_on(bp->dev);
1295 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1296 &cur_data.link_report_flags))
1301 /* Handle the FC at the end so that only these flags would be
1302 * possibly set. This way we may easily check if there is no FC
1305 if (cur_data.link_report_flags) {
1306 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1307 &cur_data.link_report_flags)) {
1308 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1309 &cur_data.link_report_flags))
1310 flow = "ON - receive & transmit";
1312 flow = "ON - receive";
1314 flow = "ON - transmit";
1319 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1320 cur_data.line_speed, duplex, flow);
1324 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1328 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1329 struct eth_rx_sge *sge;
1331 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1333 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1334 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1337 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1338 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1342 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1343 struct bnx2x_fastpath *fp, int last)
1347 for (i = 0; i < last; i++) {
1348 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1349 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1350 u8 *data = first_buf->data;
1353 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1356 if (tpa_info->tpa_state == BNX2X_TPA_START)
1357 dma_unmap_single(&bp->pdev->dev,
1358 dma_unmap_addr(first_buf, mapping),
1359 fp->rx_buf_size, DMA_FROM_DEVICE);
1360 bnx2x_frag_free(fp, data);
1361 first_buf->data = NULL;
1365 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1369 for_each_rx_queue_cnic(bp, j) {
1370 struct bnx2x_fastpath *fp = &bp->fp[j];
1374 /* Activate BD ring */
1376 * this will generate an interrupt (to the TSTORM)
1377 * must only be done after chip is initialized
1379 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1384 void bnx2x_init_rx_rings(struct bnx2x *bp)
1386 int func = BP_FUNC(bp);
1390 /* Allocate TPA resources */
1391 for_each_eth_queue(bp, j) {
1392 struct bnx2x_fastpath *fp = &bp->fp[j];
1395 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1397 if (!fp->disable_tpa) {
1398 /* Fill the per-aggregation pool */
1399 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1400 struct bnx2x_agg_info *tpa_info =
1402 struct sw_rx_bd *first_buf =
1403 &tpa_info->first_buf;
1406 bnx2x_frag_alloc(fp, GFP_KERNEL);
1407 if (!first_buf->data) {
1408 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1410 bnx2x_free_tpa_pool(bp, fp, i);
1411 fp->disable_tpa = 1;
1414 dma_unmap_addr_set(first_buf, mapping, 0);
1415 tpa_info->tpa_state = BNX2X_TPA_STOP;
1418 /* "next page" elements initialization */
1419 bnx2x_set_next_page_sgl(fp);
1421 /* set SGEs bit mask */
1422 bnx2x_init_sge_ring_bit_mask(fp);
1424 /* Allocate SGEs and initialize the ring elements */
1425 for (i = 0, ring_prod = 0;
1426 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1428 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1430 BNX2X_ERR("was only able to allocate %d rx sges\n",
1432 BNX2X_ERR("disabling TPA for queue[%d]\n",
1434 /* Cleanup already allocated elements */
1435 bnx2x_free_rx_sge_range(bp, fp,
1437 bnx2x_free_tpa_pool(bp, fp,
1439 fp->disable_tpa = 1;
1443 ring_prod = NEXT_SGE_IDX(ring_prod);
1446 fp->rx_sge_prod = ring_prod;
1450 for_each_eth_queue(bp, j) {
1451 struct bnx2x_fastpath *fp = &bp->fp[j];
1455 /* Activate BD ring */
1457 * this will generate an interrupt (to the TSTORM)
1458 * must only be done after chip is initialized
1460 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1466 if (CHIP_IS_E1(bp)) {
1467 REG_WR(bp, BAR_USTRORM_INTMEM +
1468 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1469 U64_LO(fp->rx_comp_mapping));
1470 REG_WR(bp, BAR_USTRORM_INTMEM +
1471 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1472 U64_HI(fp->rx_comp_mapping));
1477 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1480 struct bnx2x *bp = fp->bp;
1482 for_each_cos_in_tx_queue(fp, cos) {
1483 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1484 unsigned pkts_compl = 0, bytes_compl = 0;
1486 u16 sw_prod = txdata->tx_pkt_prod;
1487 u16 sw_cons = txdata->tx_pkt_cons;
1489 while (sw_cons != sw_prod) {
1490 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1491 &pkts_compl, &bytes_compl);
1495 netdev_tx_reset_queue(
1496 netdev_get_tx_queue(bp->dev,
1497 txdata->txq_index));
1501 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1505 for_each_tx_queue_cnic(bp, i) {
1506 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1510 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1514 for_each_eth_queue(bp, i) {
1515 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1519 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1521 struct bnx2x *bp = fp->bp;
1524 /* ring wasn't allocated */
1525 if (fp->rx_buf_ring == NULL)
1528 for (i = 0; i < NUM_RX_BD; i++) {
1529 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1530 u8 *data = rx_buf->data;
1534 dma_unmap_single(&bp->pdev->dev,
1535 dma_unmap_addr(rx_buf, mapping),
1536 fp->rx_buf_size, DMA_FROM_DEVICE);
1538 rx_buf->data = NULL;
1539 bnx2x_frag_free(fp, data);
1543 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1547 for_each_rx_queue_cnic(bp, j) {
1548 bnx2x_free_rx_bds(&bp->fp[j]);
1552 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1556 for_each_eth_queue(bp, j) {
1557 struct bnx2x_fastpath *fp = &bp->fp[j];
1559 bnx2x_free_rx_bds(fp);
1561 if (!fp->disable_tpa)
1562 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1566 static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1568 bnx2x_free_tx_skbs_cnic(bp);
1569 bnx2x_free_rx_skbs_cnic(bp);
1572 void bnx2x_free_skbs(struct bnx2x *bp)
1574 bnx2x_free_tx_skbs(bp);
1575 bnx2x_free_rx_skbs(bp);
1578 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1580 /* load old values */
1581 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1583 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1584 /* leave all but MAX value */
1585 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1587 /* set new MAX value */
1588 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1589 & FUNC_MF_CFG_MAX_BW_MASK;
1591 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1596 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1598 * @bp: driver handle
1599 * @nvecs: number of vectors to be released
1601 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1605 if (nvecs == offset)
1608 /* VFs don't have a default SB */
1610 free_irq(bp->msix_table[offset].vector, bp->dev);
1611 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1612 bp->msix_table[offset].vector);
1616 if (CNIC_SUPPORT(bp)) {
1617 if (nvecs == offset)
1622 for_each_eth_queue(bp, i) {
1623 if (nvecs == offset)
1625 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1626 i, bp->msix_table[offset].vector);
1628 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1632 void bnx2x_free_irq(struct bnx2x *bp)
1634 if (bp->flags & USING_MSIX_FLAG &&
1635 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1636 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1638 /* vfs don't have a default status block */
1642 bnx2x_free_msix_irqs(bp, nvecs);
1644 free_irq(bp->dev->irq, bp->dev);
1648 int bnx2x_enable_msix(struct bnx2x *bp)
1650 int msix_vec = 0, i, rc;
1652 /* VFs don't have a default status block */
1654 bp->msix_table[msix_vec].entry = msix_vec;
1655 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1656 bp->msix_table[0].entry);
1660 /* Cnic requires an msix vector for itself */
1661 if (CNIC_SUPPORT(bp)) {
1662 bp->msix_table[msix_vec].entry = msix_vec;
1663 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1664 msix_vec, bp->msix_table[msix_vec].entry);
1668 /* We need separate vectors for ETH queues only (not FCoE) */
1669 for_each_eth_queue(bp, i) {
1670 bp->msix_table[msix_vec].entry = msix_vec;
1671 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1672 msix_vec, msix_vec, i);
1676 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1679 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1680 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1682 * reconfigure number of tx/rx queues according to available
1685 if (rc == -ENOSPC) {
1686 /* Get by with single vector */
1687 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1689 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1694 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1695 bp->flags |= USING_SINGLE_MSIX_FLAG;
1697 BNX2X_DEV_INFO("set number of queues to 1\n");
1698 bp->num_ethernet_queues = 1;
1699 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1700 } else if (rc < 0) {
1701 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1703 } else if (rc < msix_vec) {
1704 /* how less vectors we will have? */
1705 int diff = msix_vec - rc;
1707 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1710 * decrease number of queues by number of unallocated entries
1712 bp->num_ethernet_queues -= diff;
1713 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1715 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1719 bp->flags |= USING_MSIX_FLAG;
1724 /* fall to INTx if not enough memory */
1726 bp->flags |= DISABLE_MSI_FLAG;
1731 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1733 int i, rc, offset = 0;
1735 /* no default status block for vf */
1737 rc = request_irq(bp->msix_table[offset++].vector,
1738 bnx2x_msix_sp_int, 0,
1739 bp->dev->name, bp->dev);
1741 BNX2X_ERR("request sp irq failed\n");
1746 if (CNIC_SUPPORT(bp))
1749 for_each_eth_queue(bp, i) {
1750 struct bnx2x_fastpath *fp = &bp->fp[i];
1751 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1754 rc = request_irq(bp->msix_table[offset].vector,
1755 bnx2x_msix_fp_int, 0, fp->name, fp);
1757 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1758 bp->msix_table[offset].vector, rc);
1759 bnx2x_free_msix_irqs(bp, offset);
1766 i = BNX2X_NUM_ETH_QUEUES(bp);
1768 offset = 1 + CNIC_SUPPORT(bp);
1769 netdev_info(bp->dev,
1770 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1771 bp->msix_table[0].vector,
1772 0, bp->msix_table[offset].vector,
1773 i - 1, bp->msix_table[offset + i - 1].vector);
1775 offset = CNIC_SUPPORT(bp);
1776 netdev_info(bp->dev,
1777 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1778 0, bp->msix_table[offset].vector,
1779 i - 1, bp->msix_table[offset + i - 1].vector);
1784 int bnx2x_enable_msi(struct bnx2x *bp)
1788 rc = pci_enable_msi(bp->pdev);
1790 BNX2X_DEV_INFO("MSI is not attainable\n");
1793 bp->flags |= USING_MSI_FLAG;
1798 static int bnx2x_req_irq(struct bnx2x *bp)
1800 unsigned long flags;
1803 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1806 flags = IRQF_SHARED;
1808 if (bp->flags & USING_MSIX_FLAG)
1809 irq = bp->msix_table[0].vector;
1811 irq = bp->pdev->irq;
1813 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1816 static int bnx2x_setup_irqs(struct bnx2x *bp)
1819 if (bp->flags & USING_MSIX_FLAG &&
1820 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1821 rc = bnx2x_req_msix_irqs(bp);
1825 rc = bnx2x_req_irq(bp);
1827 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1830 if (bp->flags & USING_MSI_FLAG) {
1831 bp->dev->irq = bp->pdev->irq;
1832 netdev_info(bp->dev, "using MSI IRQ %d\n",
1835 if (bp->flags & USING_MSIX_FLAG) {
1836 bp->dev->irq = bp->msix_table[0].vector;
1837 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1845 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1849 for_each_rx_queue_cnic(bp, i) {
1850 bnx2x_fp_init_lock(&bp->fp[i]);
1851 napi_enable(&bnx2x_fp(bp, i, napi));
1855 static void bnx2x_napi_enable(struct bnx2x *bp)
1859 for_each_eth_queue(bp, i) {
1860 bnx2x_fp_init_lock(&bp->fp[i]);
1861 napi_enable(&bnx2x_fp(bp, i, napi));
1865 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1869 for_each_rx_queue_cnic(bp, i) {
1870 napi_disable(&bnx2x_fp(bp, i, napi));
1871 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1872 usleep_range(1000, 2000);
1876 static void bnx2x_napi_disable(struct bnx2x *bp)
1880 for_each_eth_queue(bp, i) {
1881 napi_disable(&bnx2x_fp(bp, i, napi));
1882 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1883 usleep_range(1000, 2000);
1887 void bnx2x_netif_start(struct bnx2x *bp)
1889 if (netif_running(bp->dev)) {
1890 bnx2x_napi_enable(bp);
1891 if (CNIC_LOADED(bp))
1892 bnx2x_napi_enable_cnic(bp);
1893 bnx2x_int_enable(bp);
1894 if (bp->state == BNX2X_STATE_OPEN)
1895 netif_tx_wake_all_queues(bp->dev);
1899 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1901 bnx2x_int_disable_sync(bp, disable_hw);
1902 bnx2x_napi_disable(bp);
1903 if (CNIC_LOADED(bp))
1904 bnx2x_napi_disable_cnic(bp);
1907 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1908 void *accel_priv, select_queue_fallback_t fallback)
1910 struct bnx2x *bp = netdev_priv(dev);
1912 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1913 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1914 u16 ether_type = ntohs(hdr->h_proto);
1916 /* Skip VLAN tag if present */
1917 if (ether_type == ETH_P_8021Q) {
1918 struct vlan_ethhdr *vhdr =
1919 (struct vlan_ethhdr *)skb->data;
1921 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1924 /* If ethertype is FCoE or FIP - use FCoE ring */
1925 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1926 return bnx2x_fcoe_tx(bp, txq_index);
1929 /* select a non-FCoE queue */
1930 return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
1933 void bnx2x_set_num_queues(struct bnx2x *bp)
1936 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1938 /* override in STORAGE SD modes */
1939 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1940 bp->num_ethernet_queues = 1;
1942 /* Add special queues */
1943 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1944 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1946 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1950 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1952 * @bp: Driver handle
1954 * We currently support for at most 16 Tx queues for each CoS thus we will
1955 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1958 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1959 * index after all ETH L2 indices.
1961 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1962 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1963 * 16..31,...) with indices that are not coupled with any real Tx queue.
1965 * The proper configuration of skb->queue_mapping is handled by
1966 * bnx2x_select_queue() and __skb_tx_hash().
1968 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1969 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1971 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1975 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1976 rx = BNX2X_NUM_ETH_QUEUES(bp);
1978 /* account for fcoe queue */
1979 if (include_cnic && !NO_FCOE(bp)) {
1984 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1986 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1989 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1991 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1995 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
2001 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2005 for_each_queue(bp, i) {
2006 struct bnx2x_fastpath *fp = &bp->fp[i];
2009 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2012 * Although there are no IP frames expected to arrive to
2013 * this ring we still want to add an
2014 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2017 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2020 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2021 IP_HEADER_ALIGNMENT_PADDING +
2024 BNX2X_FW_RX_ALIGN_END;
2025 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2026 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2027 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2029 fp->rx_frag_size = 0;
2033 static int bnx2x_init_rss(struct bnx2x *bp)
2036 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2038 /* Prepare the initial contents for the indirection table if RSS is
2041 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2042 bp->rss_conf_obj.ind_table[i] =
2044 ethtool_rxfh_indir_default(i, num_eth_queues);
2047 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2048 * per-port, so if explicit configuration is needed , do it only
2051 * For 57712 and newer on the other hand it's a per-function
2054 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2057 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2058 bool config_hash, bool enable)
2060 struct bnx2x_config_rss_params params = {NULL};
2062 /* Although RSS is meaningless when there is a single HW queue we
2063 * still need it enabled in order to have HW Rx hash generated.
2065 * if (!is_eth_multi(bp))
2066 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2069 params.rss_obj = rss_obj;
2071 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
2074 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
2076 /* RSS configuration */
2077 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
2078 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
2079 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
2080 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
2081 if (rss_obj->udp_rss_v4)
2082 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
2083 if (rss_obj->udp_rss_v6)
2084 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
2086 __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags);
2090 params.rss_result_mask = MULTI_MASK;
2092 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2096 prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
2097 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
2101 return bnx2x_config_rss(bp, ¶ms);
2103 return bnx2x_vfpf_config_rss(bp, ¶ms);
2106 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2108 struct bnx2x_func_state_params func_params = {NULL};
2110 /* Prepare parameters for function state transitions */
2111 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2113 func_params.f_obj = &bp->func_obj;
2114 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2116 func_params.params.hw_init.load_phase = load_code;
2118 return bnx2x_func_state_change(bp, &func_params);
2122 * Cleans the object that have internal lists without sending
2123 * ramrods. Should be run when interrupts are disabled.
2125 void bnx2x_squeeze_objects(struct bnx2x *bp)
2128 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2129 struct bnx2x_mcast_ramrod_params rparam = {NULL};
2130 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2132 /***************** Cleanup MACs' object first *************************/
2134 /* Wait for completion of requested */
2135 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2136 /* Perform a dry cleanup */
2137 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2139 /* Clean ETH primary MAC */
2140 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2141 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2144 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2146 /* Cleanup UC list */
2148 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2149 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2152 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2154 /***************** Now clean mcast object *****************************/
2155 rparam.mcast_obj = &bp->mcast_obj;
2156 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2158 /* Add a DEL command... - Since we're doing a driver cleanup only,
2159 * we take a lock surrounding both the initial send and the CONTs,
2160 * as we don't want a true completion to disrupt us in the middle.
2162 netif_addr_lock_bh(bp->dev);
2163 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2165 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2168 /* ...and wait until all pending commands are cleared */
2169 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2172 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2174 netif_addr_unlock_bh(bp->dev);
2178 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2180 netif_addr_unlock_bh(bp->dev);
2183 #ifndef BNX2X_STOP_ON_ERROR
2184 #define LOAD_ERROR_EXIT(bp, label) \
2186 (bp)->state = BNX2X_STATE_ERROR; \
2190 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2192 bp->cnic_loaded = false; \
2195 #else /*BNX2X_STOP_ON_ERROR*/
2196 #define LOAD_ERROR_EXIT(bp, label) \
2198 (bp)->state = BNX2X_STATE_ERROR; \
2202 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2204 bp->cnic_loaded = false; \
2208 #endif /*BNX2X_STOP_ON_ERROR*/
2210 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2212 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2213 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2217 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2219 int num_groups, vf_headroom = 0;
2220 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2222 /* number of queues for statistics is number of eth queues + FCoE */
2223 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2225 /* Total number of FW statistics requests =
2226 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2227 * and fcoe l2 queue) stats + num of queues (which includes another 1
2228 * for fcoe l2 queue if applicable)
2230 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2232 /* vf stats appear in the request list, but their data is allocated by
2233 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2234 * it is used to determine where to place the vf stats queries in the
2238 vf_headroom = bnx2x_vf_headroom(bp);
2240 /* Request is built from stats_query_header and an array of
2241 * stats_query_cmd_group each of which contains
2242 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2243 * configured in the stats_query_header.
2246 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2247 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2250 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2251 bp->fw_stats_num, vf_headroom, num_groups);
2252 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2253 num_groups * sizeof(struct stats_query_cmd_group);
2255 /* Data for statistics requests + stats_counter
2256 * stats_counter holds per-STORM counters that are incremented
2257 * when STORM has finished with the current request.
2258 * memory for FCoE offloaded statistics are counted anyway,
2259 * even if they will not be sent.
2260 * VF stats are not accounted for here as the data of VF stats is stored
2261 * in memory allocated by the VF, not here.
2263 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2264 sizeof(struct per_pf_stats) +
2265 sizeof(struct fcoe_statistics_params) +
2266 sizeof(struct per_queue_stats) * num_queue_stats +
2267 sizeof(struct stats_counter);
2269 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2270 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2275 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2276 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2277 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2278 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2279 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2280 bp->fw_stats_req_sz;
2282 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2283 U64_HI(bp->fw_stats_req_mapping),
2284 U64_LO(bp->fw_stats_req_mapping));
2285 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2286 U64_HI(bp->fw_stats_data_mapping),
2287 U64_LO(bp->fw_stats_data_mapping));
2291 bnx2x_free_fw_stats_mem(bp);
2292 BNX2X_ERR("Can't allocate FW stats memory\n");
2296 /* send load request to mcp and analyze response */
2297 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2303 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2304 DRV_MSG_SEQ_NUMBER_MASK);
2305 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2307 /* Get current FW pulse sequence */
2308 bp->fw_drv_pulse_wr_seq =
2309 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2310 DRV_PULSE_SEQ_MASK);
2311 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2313 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2315 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2316 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2319 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2321 /* if mcp fails to respond we must abort */
2322 if (!(*load_code)) {
2323 BNX2X_ERR("MCP response failure, aborting\n");
2327 /* If mcp refused (e.g. other port is in diagnostic mode) we
2330 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2331 BNX2X_ERR("MCP refused load request, aborting\n");
2337 /* check whether another PF has already loaded FW to chip. In
2338 * virtualized environments a pf from another VM may have already
2339 * initialized the device including loading FW
2341 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2343 /* is another pf loaded on this engine? */
2344 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2345 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2346 /* build my FW version dword */
2347 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2348 (BCM_5710_FW_MINOR_VERSION << 8) +
2349 (BCM_5710_FW_REVISION_VERSION << 16) +
2350 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2352 /* read loaded FW from chip */
2353 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2355 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2358 /* abort nic load if version mismatch */
2359 if (my_fw != loaded_fw) {
2361 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2364 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2372 /* returns the "mcp load_code" according to global load_count array */
2373 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2375 int path = BP_PATH(bp);
2377 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2378 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2379 bnx2x_load_count[path][2]);
2380 bnx2x_load_count[path][0]++;
2381 bnx2x_load_count[path][1 + port]++;
2382 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2383 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2384 bnx2x_load_count[path][2]);
2385 if (bnx2x_load_count[path][0] == 1)
2386 return FW_MSG_CODE_DRV_LOAD_COMMON;
2387 else if (bnx2x_load_count[path][1 + port] == 1)
2388 return FW_MSG_CODE_DRV_LOAD_PORT;
2390 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2393 /* mark PMF if applicable */
2394 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2396 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2397 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2398 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2400 /* We need the barrier to ensure the ordering between the
2401 * writing to bp->port.pmf here and reading it from the
2402 * bnx2x_periodic_task().
2409 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2412 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2414 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2415 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2416 (bp->common.shmem2_base)) {
2417 if (SHMEM2_HAS(bp, dcc_support))
2418 SHMEM2_WR(bp, dcc_support,
2419 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2420 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2421 if (SHMEM2_HAS(bp, afex_driver_support))
2422 SHMEM2_WR(bp, afex_driver_support,
2423 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2426 /* Set AFEX default VLAN tag to an invalid value */
2427 bp->afex_def_vlan_tag = -1;
2431 * bnx2x_bz_fp - zero content of the fastpath structure.
2433 * @bp: driver handle
2434 * @index: fastpath index to be zeroed
2436 * Makes sure the contents of the bp->fp[index].napi is kept
2439 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2441 struct bnx2x_fastpath *fp = &bp->fp[index];
2443 struct napi_struct orig_napi = fp->napi;
2444 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2446 /* bzero bnx2x_fastpath contents */
2448 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2449 sizeof(struct bnx2x_agg_info));
2450 memset(fp, 0, sizeof(*fp));
2452 /* Restore the NAPI object as it has been already initialized */
2453 fp->napi = orig_napi;
2454 fp->tpa_info = orig_tpa_info;
2458 fp->max_cos = bp->max_cos;
2460 /* Special queues support only one CoS */
2463 /* Init txdata pointers */
2465 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2467 for_each_cos_in_tx_queue(fp, cos)
2468 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2469 BNX2X_NUM_ETH_QUEUES(bp) + index];
2471 /* set the tpa flag for each queue. The tpa flag determines the queue
2472 * minimal size so it must be set prior to queue memory allocation
2474 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2475 (bp->flags & GRO_ENABLE_FLAG &&
2476 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2477 if (bp->flags & TPA_ENABLE_FLAG)
2478 fp->mode = TPA_MODE_LRO;
2479 else if (bp->flags & GRO_ENABLE_FLAG)
2480 fp->mode = TPA_MODE_GRO;
2482 /* We don't want TPA on an FCoE L2 ring */
2484 fp->disable_tpa = 1;
2487 int bnx2x_load_cnic(struct bnx2x *bp)
2489 int i, rc, port = BP_PORT(bp);
2491 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2493 mutex_init(&bp->cnic_mutex);
2496 rc = bnx2x_alloc_mem_cnic(bp);
2498 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2499 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2503 rc = bnx2x_alloc_fp_mem_cnic(bp);
2505 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2506 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2509 /* Update the number of queues with the cnic queues */
2510 rc = bnx2x_set_real_num_queues(bp, 1);
2512 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2513 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2516 /* Add all CNIC NAPI objects */
2517 bnx2x_add_all_napi_cnic(bp);
2518 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2519 bnx2x_napi_enable_cnic(bp);
2521 rc = bnx2x_init_hw_func_cnic(bp);
2523 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2525 bnx2x_nic_init_cnic(bp);
2528 /* Enable Timer scan */
2529 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2531 /* setup cnic queues */
2532 for_each_cnic_queue(bp, i) {
2533 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2535 BNX2X_ERR("Queue setup failed\n");
2536 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2541 /* Initialize Rx filter. */
2542 bnx2x_set_rx_mode_inner(bp);
2544 /* re-read iscsi info */
2545 bnx2x_get_iscsi_info(bp);
2546 bnx2x_setup_cnic_irq_info(bp);
2547 bnx2x_setup_cnic_info(bp);
2548 bp->cnic_loaded = true;
2549 if (bp->state == BNX2X_STATE_OPEN)
2550 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2552 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2556 #ifndef BNX2X_STOP_ON_ERROR
2558 /* Disable Timer scan */
2559 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2562 bnx2x_napi_disable_cnic(bp);
2563 /* Update the number of queues without the cnic queues */
2564 if (bnx2x_set_real_num_queues(bp, 0))
2565 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2567 BNX2X_ERR("CNIC-related load failed\n");
2568 bnx2x_free_fp_mem_cnic(bp);
2569 bnx2x_free_mem_cnic(bp);
2571 #endif /* ! BNX2X_STOP_ON_ERROR */
2574 /* must be called with rtnl_lock */
2575 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2577 int port = BP_PORT(bp);
2578 int i, rc = 0, load_code = 0;
2580 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2582 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2584 #ifdef BNX2X_STOP_ON_ERROR
2585 if (unlikely(bp->panic)) {
2586 BNX2X_ERR("Can't load NIC when there is panic\n");
2591 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2593 /* zero the structure w/o any lock, before SP handler is initialized */
2594 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2595 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2596 &bp->last_reported_link.link_report_flags);
2599 /* must be called before memory allocation and HW init */
2600 bnx2x_ilt_set_info(bp);
2603 * Zero fastpath structures preserving invariants like napi, which are
2604 * allocated only once, fp index, max_cos, bp pointer.
2605 * Also set fp->disable_tpa and txdata_ptr.
2607 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2608 for_each_queue(bp, i)
2610 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2611 bp->num_cnic_queues) *
2612 sizeof(struct bnx2x_fp_txdata));
2614 bp->fcoe_init = false;
2616 /* Set the receive queues buffer size */
2617 bnx2x_set_rx_buf_size(bp);
2620 rc = bnx2x_alloc_mem(bp);
2622 BNX2X_ERR("Unable to allocate bp memory\n");
2627 /* need to be done after alloc mem, since it's self adjusting to amount
2628 * of memory available for RSS queues
2630 rc = bnx2x_alloc_fp_mem(bp);
2632 BNX2X_ERR("Unable to allocate memory for fps\n");
2633 LOAD_ERROR_EXIT(bp, load_error0);
2636 /* Allocated memory for FW statistics */
2637 if (bnx2x_alloc_fw_stats_mem(bp))
2638 LOAD_ERROR_EXIT(bp, load_error0);
2640 /* request pf to initialize status blocks */
2642 rc = bnx2x_vfpf_init(bp);
2644 LOAD_ERROR_EXIT(bp, load_error0);
2647 /* As long as bnx2x_alloc_mem() may possibly update
2648 * bp->num_queues, bnx2x_set_real_num_queues() should always
2649 * come after it. At this stage cnic queues are not counted.
2651 rc = bnx2x_set_real_num_queues(bp, 0);
2653 BNX2X_ERR("Unable to set real_num_queues\n");
2654 LOAD_ERROR_EXIT(bp, load_error0);
2657 /* configure multi cos mappings in kernel.
2658 * this configuration may be overridden by a multi class queue
2659 * discipline or by a dcbx negotiation result.
2661 bnx2x_setup_tc(bp->dev, bp->max_cos);
2663 /* Add all NAPI objects */
2664 bnx2x_add_all_napi(bp);
2665 DP(NETIF_MSG_IFUP, "napi added\n");
2666 bnx2x_napi_enable(bp);
2669 /* set pf load just before approaching the MCP */
2670 bnx2x_set_pf_load(bp);
2672 /* if mcp exists send load request and analyze response */
2673 if (!BP_NOMCP(bp)) {
2674 /* attempt to load pf */
2675 rc = bnx2x_nic_load_request(bp, &load_code);
2677 LOAD_ERROR_EXIT(bp, load_error1);
2679 /* what did mcp say? */
2680 rc = bnx2x_compare_fw_ver(bp, load_code, true);
2682 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2683 LOAD_ERROR_EXIT(bp, load_error2);
2686 load_code = bnx2x_nic_load_no_mcp(bp, port);
2689 /* mark pmf if applicable */
2690 bnx2x_nic_load_pmf(bp, load_code);
2692 /* Init Function state controlling object */
2693 bnx2x__init_func_obj(bp);
2696 rc = bnx2x_init_hw(bp, load_code);
2698 BNX2X_ERR("HW init failed, aborting\n");
2699 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2700 LOAD_ERROR_EXIT(bp, load_error2);
2704 bnx2x_pre_irq_nic_init(bp);
2706 /* Connect to IRQs */
2707 rc = bnx2x_setup_irqs(bp);
2709 BNX2X_ERR("setup irqs failed\n");
2711 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2712 LOAD_ERROR_EXIT(bp, load_error2);
2715 /* Init per-function objects */
2717 /* Setup NIC internals and enable interrupts */
2718 bnx2x_post_irq_nic_init(bp, load_code);
2720 bnx2x_init_bp_objs(bp);
2721 bnx2x_iov_nic_init(bp);
2723 /* Set AFEX default VLAN tag to an invalid value */
2724 bp->afex_def_vlan_tag = -1;
2725 bnx2x_nic_load_afex_dcc(bp, load_code);
2726 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2727 rc = bnx2x_func_start(bp);
2729 BNX2X_ERR("Function start failed!\n");
2730 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2732 LOAD_ERROR_EXIT(bp, load_error3);
2735 /* Send LOAD_DONE command to MCP */
2736 if (!BP_NOMCP(bp)) {
2737 load_code = bnx2x_fw_command(bp,
2738 DRV_MSG_CODE_LOAD_DONE, 0);
2740 BNX2X_ERR("MCP response failure, aborting\n");
2742 LOAD_ERROR_EXIT(bp, load_error3);
2746 /* initialize FW coalescing state machines in RAM */
2747 bnx2x_update_coalesce(bp);
2750 /* setup the leading queue */
2751 rc = bnx2x_setup_leading(bp);
2753 BNX2X_ERR("Setup leading failed!\n");
2754 LOAD_ERROR_EXIT(bp, load_error3);
2757 /* set up the rest of the queues */
2758 for_each_nondefault_eth_queue(bp, i) {
2760 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2762 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2764 BNX2X_ERR("Queue %d setup failed\n", i);
2765 LOAD_ERROR_EXIT(bp, load_error3);
2770 rc = bnx2x_init_rss(bp);
2772 BNX2X_ERR("PF RSS init failed\n");
2773 LOAD_ERROR_EXIT(bp, load_error3);
2776 /* Now when Clients are configured we are ready to work */
2777 bp->state = BNX2X_STATE_OPEN;
2779 /* Configure a ucast MAC */
2781 rc = bnx2x_set_eth_mac(bp, true);
2783 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2786 BNX2X_ERR("Setting Ethernet MAC failed\n");
2787 LOAD_ERROR_EXIT(bp, load_error3);
2790 if (IS_PF(bp) && bp->pending_max) {
2791 bnx2x_update_max_mf_config(bp, bp->pending_max);
2792 bp->pending_max = 0;
2796 rc = bnx2x_initial_phy_init(bp, load_mode);
2798 LOAD_ERROR_EXIT(bp, load_error3);
2800 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2802 /* Start fast path */
2804 /* Initialize Rx filter. */
2805 bnx2x_set_rx_mode_inner(bp);
2808 switch (load_mode) {
2810 /* Tx queue should be only re-enabled */
2811 netif_tx_wake_all_queues(bp->dev);
2815 netif_tx_start_all_queues(bp->dev);
2816 smp_mb__after_atomic();
2820 case LOAD_LOOPBACK_EXT:
2821 bp->state = BNX2X_STATE_DIAG;
2829 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2831 bnx2x__link_status_update(bp);
2833 /* start the timer */
2834 mod_timer(&bp->timer, jiffies + bp->current_interval);
2836 if (CNIC_ENABLED(bp))
2837 bnx2x_load_cnic(bp);
2840 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2842 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2843 /* mark driver is loaded in shmem2 */
2845 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2846 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2847 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2848 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2851 /* Wait for all pending SP commands to complete */
2852 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2853 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2854 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2858 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2859 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2860 bnx2x_dcbx_init(bp, false);
2862 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2866 #ifndef BNX2X_STOP_ON_ERROR
2869 bnx2x_int_disable_sync(bp, 1);
2871 /* Clean queueable objects */
2872 bnx2x_squeeze_objects(bp);
2875 /* Free SKBs, SGEs, TPA pool and driver internals */
2876 bnx2x_free_skbs(bp);
2877 for_each_rx_queue(bp, i)
2878 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2883 if (IS_PF(bp) && !BP_NOMCP(bp)) {
2884 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2885 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2890 bnx2x_napi_disable(bp);
2891 bnx2x_del_all_napi(bp);
2893 /* clear pf_load status, as it was already set */
2895 bnx2x_clear_pf_load(bp);
2897 bnx2x_free_fw_stats_mem(bp);
2898 bnx2x_free_fp_mem(bp);
2902 #endif /* ! BNX2X_STOP_ON_ERROR */
2905 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2909 /* Wait until tx fastpath tasks complete */
2910 for_each_tx_queue(bp, i) {
2911 struct bnx2x_fastpath *fp = &bp->fp[i];
2913 for_each_cos_in_tx_queue(fp, cos)
2914 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2921 /* must be called with rtnl_lock */
2922 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2925 bool global = false;
2927 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2929 /* mark driver is unloaded in shmem2 */
2930 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2932 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2933 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2934 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2937 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2938 (bp->state == BNX2X_STATE_CLOSED ||
2939 bp->state == BNX2X_STATE_ERROR)) {
2940 /* We can get here if the driver has been unloaded
2941 * during parity error recovery and is either waiting for a
2942 * leader to complete or for other functions to unload and
2943 * then ifdown has been issued. In this case we want to
2944 * unload and let other functions to complete a recovery
2947 bp->recovery_state = BNX2X_RECOVERY_DONE;
2949 bnx2x_release_leader_lock(bp);
2952 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2953 BNX2X_ERR("Can't unload in closed or error state\n");
2957 /* Nothing to do during unload if previous bnx2x_nic_load()
2958 * have not completed successfully - all resources are released.
2960 * we can get here only after unsuccessful ndo_* callback, during which
2961 * dev->IFF_UP flag is still on.
2963 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2966 /* It's important to set the bp->state to the value different from
2967 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2968 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2970 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2973 /* indicate to VFs that the PF is going down */
2974 bnx2x_iov_channel_down(bp);
2976 if (CNIC_LOADED(bp))
2977 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2980 bnx2x_tx_disable(bp);
2981 netdev_reset_tc(bp->dev);
2983 bp->rx_mode = BNX2X_RX_MODE_NONE;
2985 del_timer_sync(&bp->timer);
2988 /* Set ALWAYS_ALIVE bit in shmem */
2989 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2990 bnx2x_drv_pulse(bp);
2991 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2992 bnx2x_save_statistics(bp);
2995 /* wait till consumers catch up with producers in all queues */
2996 bnx2x_drain_tx_queues(bp);
2998 /* if VF indicate to PF this function is going down (PF will delete sp
2999 * elements and clear initializations
3002 bnx2x_vfpf_close_vf(bp);
3003 else if (unload_mode != UNLOAD_RECOVERY)
3004 /* if this is a normal/close unload need to clean up chip*/
3005 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3007 /* Send the UNLOAD_REQUEST to the MCP */
3008 bnx2x_send_unload_req(bp, unload_mode);
3010 /* Prevent transactions to host from the functions on the
3011 * engine that doesn't reset global blocks in case of global
3012 * attention once global blocks are reset and gates are opened
3013 * (the engine which leader will perform the recovery
3016 if (!CHIP_IS_E1x(bp))
3017 bnx2x_pf_disable(bp);
3019 /* Disable HW interrupts, NAPI */
3020 bnx2x_netif_stop(bp, 1);
3021 /* Delete all NAPI objects */
3022 bnx2x_del_all_napi(bp);
3023 if (CNIC_LOADED(bp))
3024 bnx2x_del_all_napi_cnic(bp);
3028 /* Report UNLOAD_DONE to MCP */
3029 bnx2x_send_unload_done(bp, false);
3033 * At this stage no more interrupts will arrive so we may safely clean
3034 * the queueable objects here in case they failed to get cleaned so far.
3037 bnx2x_squeeze_objects(bp);
3039 /* There should be no more pending SP commands at this stage */
3044 /* clear pending work in rtnl task */
3045 bp->sp_rtnl_state = 0;
3048 /* Free SKBs, SGEs, TPA pool and driver internals */
3049 bnx2x_free_skbs(bp);
3050 if (CNIC_LOADED(bp))
3051 bnx2x_free_skbs_cnic(bp);
3052 for_each_rx_queue(bp, i)
3053 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3055 bnx2x_free_fp_mem(bp);
3056 if (CNIC_LOADED(bp))
3057 bnx2x_free_fp_mem_cnic(bp);
3060 if (CNIC_LOADED(bp))
3061 bnx2x_free_mem_cnic(bp);
3065 bp->state = BNX2X_STATE_CLOSED;
3066 bp->cnic_loaded = false;
3068 /* Clear driver version indication in shmem */
3070 bnx2x_update_mng_version(bp);
3072 /* Check if there are pending parity attentions. If there are - set
3073 * RECOVERY_IN_PROGRESS.
3075 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3076 bnx2x_set_reset_in_progress(bp);
3078 /* Set RESET_IS_GLOBAL if needed */
3080 bnx2x_set_reset_global(bp);
3083 /* The last driver must disable a "close the gate" if there is no
3084 * parity attention or "process kill" pending.
3087 !bnx2x_clear_pf_load(bp) &&
3088 bnx2x_reset_is_done(bp, BP_PATH(bp)))
3089 bnx2x_disable_close_the_gate(bp);
3091 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3096 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3100 /* If there is no power capability, silently succeed */
3101 if (!bp->pdev->pm_cap) {
3102 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3106 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3110 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3111 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3112 PCI_PM_CTRL_PME_STATUS));
3114 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3115 /* delay required during transition out of D3hot */
3120 /* If there are other clients above don't
3121 shut down the power */
3122 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3124 /* Don't shut down the power for emulation and FPGA */
3125 if (CHIP_REV_IS_SLOW(bp))
3128 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3132 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3134 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3137 /* No more memory access after this point until
3138 * device is brought back to D0.
3143 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3150 * net_device service functions
3152 static int bnx2x_poll(struct napi_struct *napi, int budget)
3156 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3158 struct bnx2x *bp = fp->bp;
3161 #ifdef BNX2X_STOP_ON_ERROR
3162 if (unlikely(bp->panic)) {
3163 napi_complete(napi);
3167 if (!bnx2x_fp_lock_napi(fp))
3170 for_each_cos_in_tx_queue(fp, cos)
3171 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3172 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3174 if (bnx2x_has_rx_work(fp)) {
3175 work_done += bnx2x_rx_int(fp, budget - work_done);
3177 /* must not complete if we consumed full budget */
3178 if (work_done >= budget) {
3179 bnx2x_fp_unlock_napi(fp);
3184 /* Fall out from the NAPI loop if needed */
3185 if (!bnx2x_fp_unlock_napi(fp) &&
3186 !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3188 /* No need to update SB for FCoE L2 ring as long as
3189 * it's connected to the default SB and the SB
3190 * has been updated when NAPI was scheduled.
3192 if (IS_FCOE_FP(fp)) {
3193 napi_complete(napi);
3196 bnx2x_update_fpsb_idx(fp);
3197 /* bnx2x_has_rx_work() reads the status block,
3198 * thus we need to ensure that status block indices
3199 * have been actually read (bnx2x_update_fpsb_idx)
3200 * prior to this check (bnx2x_has_rx_work) so that
3201 * we won't write the "newer" value of the status block
3202 * to IGU (if there was a DMA right after
3203 * bnx2x_has_rx_work and if there is no rmb, the memory
3204 * reading (bnx2x_update_fpsb_idx) may be postponed
3205 * to right before bnx2x_ack_sb). In this case there
3206 * will never be another interrupt until there is
3207 * another update of the status block, while there
3208 * is still unhandled work.
3212 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3213 napi_complete(napi);
3214 /* Re-enable interrupts */
3215 DP(NETIF_MSG_RX_STATUS,
3216 "Update index to %d\n", fp->fp_hc_idx);
3217 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3218 le16_to_cpu(fp->fp_hc_idx),
3228 #ifdef CONFIG_NET_RX_BUSY_POLL
3229 /* must be called with local_bh_disable()d */
3230 int bnx2x_low_latency_recv(struct napi_struct *napi)
3232 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3234 struct bnx2x *bp = fp->bp;
3237 if ((bp->state == BNX2X_STATE_CLOSED) ||
3238 (bp->state == BNX2X_STATE_ERROR) ||
3239 (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3240 return LL_FLUSH_FAILED;
3242 if (!bnx2x_fp_lock_poll(fp))
3243 return LL_FLUSH_BUSY;
3245 if (bnx2x_has_rx_work(fp))
3246 found = bnx2x_rx_int(fp, 4);
3248 bnx2x_fp_unlock_poll(fp);
3254 /* we split the first BD into headers and data BDs
3255 * to ease the pain of our fellow microcode engineers
3256 * we use one mapping for both BDs
3258 static u16 bnx2x_tx_split(struct bnx2x *bp,
3259 struct bnx2x_fp_txdata *txdata,
3260 struct sw_tx_bd *tx_buf,
3261 struct eth_tx_start_bd **tx_bd, u16 hlen,
3264 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3265 struct eth_tx_bd *d_tx_bd;
3267 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3269 /* first fix first BD */
3270 h_tx_bd->nbytes = cpu_to_le16(hlen);
3272 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3273 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3275 /* now get a new data BD
3276 * (after the pbd) and fill it */
3277 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3278 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3280 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3281 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3283 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3284 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3285 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3287 /* this marks the BD as one that has no individual mapping */
3288 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3290 DP(NETIF_MSG_TX_QUEUED,
3291 "TSO split data size is %d (%x:%x)\n",
3292 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3295 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3300 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3301 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3302 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3304 __sum16 tsum = (__force __sum16) csum;
3307 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3308 csum_partial(t_header - fix, fix, 0)));
3311 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3312 csum_partial(t_header, -fix, 0)));
3314 return bswab16(tsum);
3317 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3323 if (skb->ip_summed != CHECKSUM_PARTIAL)
3326 protocol = vlan_get_protocol(skb);
3327 if (protocol == htons(ETH_P_IPV6)) {
3329 prot = ipv6_hdr(skb)->nexthdr;
3332 prot = ip_hdr(skb)->protocol;
3335 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3336 if (inner_ip_hdr(skb)->version == 6) {
3337 rc |= XMIT_CSUM_ENC_V6;
3338 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3339 rc |= XMIT_CSUM_TCP;
3341 rc |= XMIT_CSUM_ENC_V4;
3342 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3343 rc |= XMIT_CSUM_TCP;
3346 if (prot == IPPROTO_TCP)
3347 rc |= XMIT_CSUM_TCP;
3349 if (skb_is_gso(skb)) {
3350 if (skb_is_gso_v6(skb)) {
3351 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3352 if (rc & XMIT_CSUM_ENC)
3353 rc |= XMIT_GSO_ENC_V6;
3355 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3356 if (rc & XMIT_CSUM_ENC)
3357 rc |= XMIT_GSO_ENC_V4;
3364 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3365 /* check if packet requires linearization (packet is too fragmented)
3366 no need to check fragmentation if page size > 8K (there will be no
3367 violation to FW restrictions) */
3368 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3373 int first_bd_sz = 0;
3375 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3376 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3378 if (xmit_type & XMIT_GSO) {
3379 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3380 /* Check if LSO packet needs to be copied:
3381 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3382 int wnd_size = MAX_FETCH_BD - 3;
3383 /* Number of windows to check */
3384 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3389 /* Headers length */
3390 hlen = (int)(skb_transport_header(skb) - skb->data) +
3393 /* Amount of data (w/o headers) on linear part of SKB*/
3394 first_bd_sz = skb_headlen(skb) - hlen;
3396 wnd_sum = first_bd_sz;
3398 /* Calculate the first sum - it's special */
3399 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3401 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3403 /* If there was data on linear skb data - check it */
3404 if (first_bd_sz > 0) {
3405 if (unlikely(wnd_sum < lso_mss)) {
3410 wnd_sum -= first_bd_sz;
3413 /* Others are easier: run through the frag list and
3414 check all windows */
3415 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3417 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3419 if (unlikely(wnd_sum < lso_mss)) {
3424 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3427 /* in non-LSO too fragmented packet should always
3434 if (unlikely(to_copy))
3435 DP(NETIF_MSG_TX_QUEUED,
3436 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3437 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3438 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3444 static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3447 struct ipv6hdr *ipv6;
3449 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3450 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3451 ETH_TX_PARSE_BD_E2_LSO_MSS;
3453 if (xmit_type & XMIT_GSO_ENC_V6)
3454 ipv6 = inner_ipv6_hdr(skb);
3455 else if (xmit_type & XMIT_GSO_V6)
3456 ipv6 = ipv6_hdr(skb);
3460 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
3461 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3465 * bnx2x_set_pbd_gso - update PBD in GSO case.
3469 * @xmit_type: xmit flags
3471 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3472 struct eth_tx_parse_bd_e1x *pbd,
3473 struct eth_tx_start_bd *tx_start_bd,
3476 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3477 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3478 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3480 if (xmit_type & XMIT_GSO_V4) {
3481 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3482 pbd->tcp_pseudo_csum =
3483 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3485 0, IPPROTO_TCP, 0));
3487 /* GSO on 57710/57711 needs FW to calculate IP checksum */
3488 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3490 pbd->tcp_pseudo_csum =
3491 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3492 &ipv6_hdr(skb)->daddr,
3493 0, IPPROTO_TCP, 0));
3497 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3501 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3503 * @bp: driver handle
3505 * @parsing_data: data to be updated
3506 * @xmit_type: xmit flags
3508 * 57712/578xx related, when skb has encapsulation
3510 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3511 u32 *parsing_data, u32 xmit_type)
3514 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3515 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3516 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3518 if (xmit_type & XMIT_CSUM_TCP) {
3519 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3520 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3521 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3523 return skb_inner_transport_header(skb) +
3524 inner_tcp_hdrlen(skb) - skb->data;
3527 /* We support checksum offload for TCP and UDP only.
3528 * No need to pass the UDP header length - it's a constant.
3530 return skb_inner_transport_header(skb) +
3531 sizeof(struct udphdr) - skb->data;
3535 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3537 * @bp: driver handle
3539 * @parsing_data: data to be updated
3540 * @xmit_type: xmit flags
3542 * 57712/578xx related
3544 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3545 u32 *parsing_data, u32 xmit_type)
3548 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3549 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3550 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3552 if (xmit_type & XMIT_CSUM_TCP) {
3553 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3554 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3555 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3557 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3559 /* We support checksum offload for TCP and UDP only.
3560 * No need to pass the UDP header length - it's a constant.
3562 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3565 /* set FW indication according to inner or outer protocols if tunneled */
3566 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3567 struct eth_tx_start_bd *tx_start_bd,
3570 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3572 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3573 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3575 if (!(xmit_type & XMIT_CSUM_TCP))
3576 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3580 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3582 * @bp: driver handle
3584 * @pbd: parse BD to be updated
3585 * @xmit_type: xmit flags
3587 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3588 struct eth_tx_parse_bd_e1x *pbd,
3591 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3593 /* for now NS flag is not used in Linux */
3596 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3597 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3599 pbd->ip_hlen_w = (skb_transport_header(skb) -
3600 skb_network_header(skb)) >> 1;
3602 hlen += pbd->ip_hlen_w;
3604 /* We support checksum offload for TCP and UDP only */
3605 if (xmit_type & XMIT_CSUM_TCP)
3606 hlen += tcp_hdrlen(skb) / 2;
3608 hlen += sizeof(struct udphdr) / 2;
3610 pbd->total_hlen_w = cpu_to_le16(hlen);
3613 if (xmit_type & XMIT_CSUM_TCP) {
3614 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3617 s8 fix = SKB_CS_OFF(skb); /* signed! */
3619 DP(NETIF_MSG_TX_QUEUED,
3620 "hlen %d fix %d csum before fix %x\n",
3621 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3623 /* HW bug: fixup the CSUM */
3624 pbd->tcp_pseudo_csum =
3625 bnx2x_csum_fix(skb_transport_header(skb),
3628 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3629 pbd->tcp_pseudo_csum);
3635 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3636 struct eth_tx_parse_bd_e2 *pbd_e2,
3637 struct eth_tx_parse_2nd_bd *pbd2,
3642 u8 outerip_off, outerip_len = 0;
3644 /* from outer IP to transport */
3645 hlen_w = (skb_inner_transport_header(skb) -
3646 skb_network_header(skb)) >> 1;
3649 hlen_w += inner_tcp_hdrlen(skb) >> 1;
3651 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3653 /* outer IP header info */
3654 if (xmit_type & XMIT_CSUM_V4) {
3655 struct iphdr *iph = ip_hdr(skb);
3656 u32 csum = (__force u32)(~iph->check) -
3657 (__force u32)iph->tot_len -
3658 (__force u32)iph->frag_off;
3660 pbd2->fw_ip_csum_wo_len_flags_frag =
3661 bswab16(csum_fold((__force __wsum)csum));
3663 pbd2->fw_ip_hdr_to_payload_w =
3664 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3667 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3669 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3671 if (xmit_type & XMIT_GSO_V4) {
3672 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3674 pbd_e2->data.tunnel_data.pseudo_csum =
3675 bswab16(~csum_tcpudp_magic(
3676 inner_ip_hdr(skb)->saddr,
3677 inner_ip_hdr(skb)->daddr,
3678 0, IPPROTO_TCP, 0));
3680 outerip_len = ip_hdr(skb)->ihl << 1;
3682 pbd_e2->data.tunnel_data.pseudo_csum =
3683 bswab16(~csum_ipv6_magic(
3684 &inner_ipv6_hdr(skb)->saddr,
3685 &inner_ipv6_hdr(skb)->daddr,
3686 0, IPPROTO_TCP, 0));
3689 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3693 (!!(xmit_type & XMIT_CSUM_V6) <<
3694 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3696 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3697 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3698 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3700 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3701 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3702 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3706 /* called with netif_tx_lock
3707 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3708 * netif_wake_queue()
3710 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3712 struct bnx2x *bp = netdev_priv(dev);
3714 struct netdev_queue *txq;
3715 struct bnx2x_fp_txdata *txdata;
3716 struct sw_tx_bd *tx_buf;
3717 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3718 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3719 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3720 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3721 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3722 u32 pbd_e2_parsing_data = 0;
3723 u16 pkt_prod, bd_prod;
3726 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3729 __le16 pkt_size = 0;
3731 u8 mac_type = UNICAST_ADDRESS;
3733 #ifdef BNX2X_STOP_ON_ERROR
3734 if (unlikely(bp->panic))
3735 return NETDEV_TX_BUSY;
3738 txq_index = skb_get_queue_mapping(skb);
3739 txq = netdev_get_tx_queue(dev, txq_index);
3741 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3743 txdata = &bp->bnx2x_txq[txq_index];
3745 /* enable this debug print to view the transmission queue being used
3746 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3747 txq_index, fp_index, txdata_index); */
3749 /* enable this debug print to view the transmission details
3750 DP(NETIF_MSG_TX_QUEUED,
3751 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3752 txdata->cid, fp_index, txdata_index, txdata, fp); */
3754 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3755 skb_shinfo(skb)->nr_frags +
3757 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3758 /* Handle special storage cases separately */
3759 if (txdata->tx_ring_size == 0) {
3760 struct bnx2x_eth_q_stats *q_stats =
3761 bnx2x_fp_qstats(bp, txdata->parent_fp);
3762 q_stats->driver_filtered_tx_pkt++;
3764 return NETDEV_TX_OK;
3766 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3767 netif_tx_stop_queue(txq);
3768 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3770 return NETDEV_TX_BUSY;
3773 DP(NETIF_MSG_TX_QUEUED,
3774 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
3775 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3776 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3779 eth = (struct ethhdr *)skb->data;
3781 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3782 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3783 if (is_broadcast_ether_addr(eth->h_dest))
3784 mac_type = BROADCAST_ADDRESS;
3786 mac_type = MULTICAST_ADDRESS;
3789 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3790 /* First, check if we need to linearize the skb (due to FW
3791 restrictions). No need to check fragmentation if page size > 8K
3792 (there will be no violation to FW restrictions) */
3793 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3794 /* Statistics of linearization */
3796 if (skb_linearize(skb) != 0) {
3797 DP(NETIF_MSG_TX_QUEUED,
3798 "SKB linearization failed - silently dropping this SKB\n");
3799 dev_kfree_skb_any(skb);
3800 return NETDEV_TX_OK;
3804 /* Map skb linear data for DMA */
3805 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3806 skb_headlen(skb), DMA_TO_DEVICE);
3807 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3808 DP(NETIF_MSG_TX_QUEUED,
3809 "SKB mapping failed - silently dropping this SKB\n");
3810 dev_kfree_skb_any(skb);
3811 return NETDEV_TX_OK;
3814 Please read carefully. First we use one BD which we mark as start,
3815 then we have a parsing info BD (used for TSO or xsum),
3816 and only then we have the rest of the TSO BDs.
3817 (don't forget to mark the last one as last,
3818 and to unmap only AFTER you write to the BD ...)
3819 And above all, all pdb sizes are in words - NOT DWORDS!
3822 /* get current pkt produced now - advance it just before sending packet
3823 * since mapping of pages may fail and cause packet to be dropped
3825 pkt_prod = txdata->tx_pkt_prod;
3826 bd_prod = TX_BD(txdata->tx_bd_prod);
3828 /* get a tx_buf and first BD
3829 * tx_start_bd may be changed during SPLIT,
3830 * but first_bd will always stay first
3832 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3833 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3834 first_bd = tx_start_bd;
3836 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3838 /* header nbd: indirectly zero other flags! */
3839 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3841 /* remember the first BD of the packet */
3842 tx_buf->first_bd = txdata->tx_bd_prod;
3846 DP(NETIF_MSG_TX_QUEUED,
3847 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3848 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3850 if (vlan_tx_tag_present(skb)) {
3851 tx_start_bd->vlan_or_ethertype =
3852 cpu_to_le16(vlan_tx_tag_get(skb));
3853 tx_start_bd->bd_flags.as_bitfield |=
3854 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3856 /* when transmitting in a vf, start bd must hold the ethertype
3857 * for fw to enforce it
3860 tx_start_bd->vlan_or_ethertype =
3861 cpu_to_le16(ntohs(eth->h_proto));
3863 /* used by FW for packet accounting */
3864 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3867 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3869 /* turn on parsing and get a BD */
3870 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3872 if (xmit_type & XMIT_CSUM)
3873 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3875 if (!CHIP_IS_E1x(bp)) {
3876 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3877 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3879 if (xmit_type & XMIT_CSUM_ENC) {
3880 u16 global_data = 0;
3882 /* Set PBD in enc checksum offload case */
3883 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3884 &pbd_e2_parsing_data,
3887 /* turn on 2nd parsing and get a BD */
3888 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3890 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3892 memset(pbd2, 0, sizeof(*pbd2));
3894 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3895 (skb_inner_network_header(skb) -
3898 if (xmit_type & XMIT_GSO_ENC)
3899 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3903 pbd2->global_data = cpu_to_le16(global_data);
3905 /* add addition parse BD indication to start BD */
3906 SET_FLAG(tx_start_bd->general_data,
3907 ETH_TX_START_BD_PARSE_NBDS, 1);
3908 /* set encapsulation flag in start BD */
3909 SET_FLAG(tx_start_bd->general_data,
3910 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3912 tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3915 } else if (xmit_type & XMIT_CSUM) {
3916 /* Set PBD in checksum offload case w/o encapsulation */
3917 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3918 &pbd_e2_parsing_data,
3922 /* Add the macs to the parsing BD if this is a vf or if
3923 * Tx Switching is enabled.
3926 /* override GRE parameters in BD */
3927 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3928 &pbd_e2->data.mac_addr.src_mid,
3929 &pbd_e2->data.mac_addr.src_lo,
3932 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3933 &pbd_e2->data.mac_addr.dst_mid,
3934 &pbd_e2->data.mac_addr.dst_lo,
3936 } else if (bp->flags & TX_SWITCHING) {
3937 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3938 &pbd_e2->data.mac_addr.dst_mid,
3939 &pbd_e2->data.mac_addr.dst_lo,
3943 SET_FLAG(pbd_e2_parsing_data,
3944 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3946 u16 global_data = 0;
3947 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3948 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3949 /* Set PBD in checksum offload case */
3950 if (xmit_type & XMIT_CSUM)
3951 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3953 SET_FLAG(global_data,
3954 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3955 pbd_e1x->global_data |= cpu_to_le16(global_data);
3958 /* Setup the data pointer of the first BD of the packet */
3959 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3960 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3961 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3962 pkt_size = tx_start_bd->nbytes;
3964 DP(NETIF_MSG_TX_QUEUED,
3965 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
3966 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3967 le16_to_cpu(tx_start_bd->nbytes),
3968 tx_start_bd->bd_flags.as_bitfield,
3969 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3971 if (xmit_type & XMIT_GSO) {
3973 DP(NETIF_MSG_TX_QUEUED,
3974 "TSO packet len %d hlen %d total len %d tso size %d\n",
3975 skb->len, hlen, skb_headlen(skb),
3976 skb_shinfo(skb)->gso_size);
3978 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3980 if (unlikely(skb_headlen(skb) > hlen)) {
3982 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3986 if (!CHIP_IS_E1x(bp))
3987 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3990 bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
3993 /* Set the PBD's parsing_data field if not zero
3994 * (for the chips newer than 57711).
3996 if (pbd_e2_parsing_data)
3997 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3999 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4001 /* Handle fragmented skb */
4002 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4003 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4005 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4006 skb_frag_size(frag), DMA_TO_DEVICE);
4007 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
4008 unsigned int pkts_compl = 0, bytes_compl = 0;
4010 DP(NETIF_MSG_TX_QUEUED,
4011 "Unable to map page - dropping packet...\n");
4013 /* we need unmap all buffers already mapped
4015 * first_bd->nbd need to be properly updated
4016 * before call to bnx2x_free_tx_pkt
4018 first_bd->nbd = cpu_to_le16(nbd);
4019 bnx2x_free_tx_pkt(bp, txdata,
4020 TX_BD(txdata->tx_pkt_prod),
4021 &pkts_compl, &bytes_compl);
4022 return NETDEV_TX_OK;
4025 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4026 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4027 if (total_pkt_bd == NULL)
4028 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4030 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4031 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4032 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4033 le16_add_cpu(&pkt_size, skb_frag_size(frag));
4036 DP(NETIF_MSG_TX_QUEUED,
4037 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4038 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4039 le16_to_cpu(tx_data_bd->nbytes));
4042 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4044 /* update with actual num BDs */
4045 first_bd->nbd = cpu_to_le16(nbd);
4047 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4049 /* now send a tx doorbell, counting the next BD
4050 * if the packet contains or ends with it
4052 if (TX_BD_POFF(bd_prod) < nbd)
4055 /* total_pkt_bytes should be set on the first data BD if
4056 * it's not an LSO packet and there is more than one
4057 * data BD. In this case pkt_size is limited by an MTU value.
4058 * However we prefer to set it for an LSO packet (while we don't
4059 * have to) in order to save some CPU cycles in a none-LSO
4060 * case, when we much more care about them.
4062 if (total_pkt_bd != NULL)
4063 total_pkt_bd->total_pkt_bytes = pkt_size;
4066 DP(NETIF_MSG_TX_QUEUED,
4067 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
4068 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4069 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4070 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4071 le16_to_cpu(pbd_e1x->total_hlen_w));
4073 DP(NETIF_MSG_TX_QUEUED,
4074 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
4076 pbd_e2->data.mac_addr.dst_hi,
4077 pbd_e2->data.mac_addr.dst_mid,
4078 pbd_e2->data.mac_addr.dst_lo,
4079 pbd_e2->data.mac_addr.src_hi,
4080 pbd_e2->data.mac_addr.src_mid,
4081 pbd_e2->data.mac_addr.src_lo,
4082 pbd_e2->parsing_data);
4083 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4085 netdev_tx_sent_queue(txq, skb->len);
4087 skb_tx_timestamp(skb);
4089 txdata->tx_pkt_prod++;
4091 * Make sure that the BD data is updated before updating the producer
4092 * since FW might read the BD right after the producer is updated.
4093 * This is only applicable for weak-ordered memory model archs such
4094 * as IA-64. The following barrier is also mandatory since FW will
4095 * assumes packets must have BDs.
4099 txdata->tx_db.data.prod += nbd;
4102 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
4106 txdata->tx_bd_prod += nbd;
4108 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4109 netif_tx_stop_queue(txq);
4111 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4112 * ordering of set_bit() in netif_tx_stop_queue() and read of
4116 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4117 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4118 netif_tx_wake_queue(txq);
4122 return NETDEV_TX_OK;
4126 * bnx2x_setup_tc - routine to configure net_device for multi tc
4128 * @netdev: net device to configure
4129 * @tc: number of traffic classes to enable
4131 * callback connected to the ndo_setup_tc function pointer
4133 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4135 int cos, prio, count, offset;
4136 struct bnx2x *bp = netdev_priv(dev);
4138 /* setup tc must be called under rtnl lock */
4141 /* no traffic classes requested. Aborting */
4143 netdev_reset_tc(dev);
4147 /* requested to support too many traffic classes */
4148 if (num_tc > bp->max_cos) {
4149 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4150 num_tc, bp->max_cos);
4154 /* declare amount of supported traffic classes */
4155 if (netdev_set_num_tc(dev, num_tc)) {
4156 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4160 /* configure priority to traffic class mapping */
4161 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4162 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
4163 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4164 "mapping priority %d to tc %d\n",
4165 prio, bp->prio_to_cos[prio]);
4168 /* Use this configuration to differentiate tc0 from other COSes
4169 This can be used for ets or pfc, and save the effort of setting
4170 up a multio class queue disc or negotiating DCBX with a switch
4171 netdev_set_prio_tc_map(dev, 0, 0);
4172 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4173 for (prio = 1; prio < 16; prio++) {
4174 netdev_set_prio_tc_map(dev, prio, 1);
4175 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4178 /* configure traffic class to transmission queue mapping */
4179 for (cos = 0; cos < bp->max_cos; cos++) {
4180 count = BNX2X_NUM_ETH_QUEUES(bp);
4181 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4182 netdev_set_tc_queue(dev, cos, count, offset);
4183 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4184 "mapping tc %d to offset %d count %d\n",
4185 cos, offset, count);
4191 /* called with rtnl_lock */
4192 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4194 struct sockaddr *addr = p;
4195 struct bnx2x *bp = netdev_priv(dev);
4198 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4199 BNX2X_ERR("Requested MAC address is not valid\n");
4203 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4204 !is_zero_ether_addr(addr->sa_data)) {
4205 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
4209 if (netif_running(dev)) {
4210 rc = bnx2x_set_eth_mac(bp, false);
4215 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4217 if (netif_running(dev))
4218 rc = bnx2x_set_eth_mac(bp, true);
4223 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4225 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4226 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4231 if (IS_FCOE_IDX(fp_index)) {
4232 memset(sb, 0, sizeof(union host_hc_status_block));
4233 fp->status_blk_mapping = 0;
4236 if (!CHIP_IS_E1x(bp))
4237 BNX2X_PCI_FREE(sb->e2_sb,
4238 bnx2x_fp(bp, fp_index,
4239 status_blk_mapping),
4240 sizeof(struct host_hc_status_block_e2));
4242 BNX2X_PCI_FREE(sb->e1x_sb,
4243 bnx2x_fp(bp, fp_index,
4244 status_blk_mapping),
4245 sizeof(struct host_hc_status_block_e1x));
4249 if (!skip_rx_queue(bp, fp_index)) {
4250 bnx2x_free_rx_bds(fp);
4252 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4253 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4254 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4255 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4256 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4258 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4259 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4260 sizeof(struct eth_fast_path_rx_cqe) *
4264 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4265 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4266 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4267 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4271 if (!skip_tx_queue(bp, fp_index)) {
4272 /* fastpath tx rings: tx_buf tx_desc */
4273 for_each_cos_in_tx_queue(fp, cos) {
4274 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4276 DP(NETIF_MSG_IFDOWN,
4277 "freeing tx memory of fp %d cos %d cid %d\n",
4278 fp_index, cos, txdata->cid);
4280 BNX2X_FREE(txdata->tx_buf_ring);
4281 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4282 txdata->tx_desc_mapping,
4283 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4286 /* end of fastpath */
4289 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4292 for_each_cnic_queue(bp, i)
4293 bnx2x_free_fp_mem_at(bp, i);
4296 void bnx2x_free_fp_mem(struct bnx2x *bp)
4299 for_each_eth_queue(bp, i)
4300 bnx2x_free_fp_mem_at(bp, i);
4303 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4305 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4306 if (!CHIP_IS_E1x(bp)) {
4307 bnx2x_fp(bp, index, sb_index_values) =
4308 (__le16 *)status_blk.e2_sb->sb.index_values;
4309 bnx2x_fp(bp, index, sb_running_index) =
4310 (__le16 *)status_blk.e2_sb->sb.running_index;
4312 bnx2x_fp(bp, index, sb_index_values) =
4313 (__le16 *)status_blk.e1x_sb->sb.index_values;
4314 bnx2x_fp(bp, index, sb_running_index) =
4315 (__le16 *)status_blk.e1x_sb->sb.running_index;
4319 /* Returns the number of actually allocated BDs */
4320 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4323 struct bnx2x *bp = fp->bp;
4324 u16 ring_prod, cqe_ring_prod;
4325 int i, failure_cnt = 0;
4327 fp->rx_comp_cons = 0;
4328 cqe_ring_prod = ring_prod = 0;
4330 /* This routine is called only during fo init so
4331 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4333 for (i = 0; i < rx_ring_size; i++) {
4334 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4338 ring_prod = NEXT_RX_IDX(ring_prod);
4339 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4340 WARN_ON(ring_prod <= (i - failure_cnt));
4344 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4345 i - failure_cnt, fp->index);
4347 fp->rx_bd_prod = ring_prod;
4348 /* Limit the CQE producer by the CQE ring size */
4349 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4351 fp->rx_pkt = fp->rx_calls = 0;
4353 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4355 return i - failure_cnt;
4358 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4362 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4363 struct eth_rx_cqe_next_page *nextpg;
4365 nextpg = (struct eth_rx_cqe_next_page *)
4366 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4368 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4369 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4371 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4372 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4376 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4378 union host_hc_status_block *sb;
4379 struct bnx2x_fastpath *fp = &bp->fp[index];
4382 int rx_ring_size = 0;
4384 if (!bp->rx_ring_size &&
4385 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
4386 rx_ring_size = MIN_RX_SIZE_NONTPA;
4387 bp->rx_ring_size = rx_ring_size;
4388 } else if (!bp->rx_ring_size) {
4389 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4391 if (CHIP_IS_E3(bp)) {
4392 u32 cfg = SHMEM_RD(bp,
4393 dev_info.port_hw_config[BP_PORT(bp)].
4396 /* Decrease ring size for 1G functions */
4397 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4398 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4402 /* allocate at least number of buffers required by FW */
4403 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4404 MIN_RX_SIZE_TPA, rx_ring_size);
4406 bp->rx_ring_size = rx_ring_size;
4407 } else /* if rx_ring_size specified - use it */
4408 rx_ring_size = bp->rx_ring_size;
4410 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4413 sb = &bnx2x_fp(bp, index, status_blk);
4415 if (!IS_FCOE_IDX(index)) {
4417 if (!CHIP_IS_E1x(bp)) {
4418 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4419 sizeof(struct host_hc_status_block_e2));
4423 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4424 sizeof(struct host_hc_status_block_e1x));
4430 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4431 * set shortcuts for it.
4433 if (!IS_FCOE_IDX(index))
4434 set_sb_shortcuts(bp, index);
4437 if (!skip_tx_queue(bp, index)) {
4438 /* fastpath tx rings: tx_buf tx_desc */
4439 for_each_cos_in_tx_queue(fp, cos) {
4440 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4443 "allocating tx memory of fp %d cos %d\n",
4446 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4447 sizeof(struct sw_tx_bd),
4449 if (!txdata->tx_buf_ring)
4451 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4452 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4453 if (!txdata->tx_desc_ring)
4459 if (!skip_rx_queue(bp, index)) {
4460 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4461 bnx2x_fp(bp, index, rx_buf_ring) =
4462 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4463 if (!bnx2x_fp(bp, index, rx_buf_ring))
4465 bnx2x_fp(bp, index, rx_desc_ring) =
4466 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4467 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4468 if (!bnx2x_fp(bp, index, rx_desc_ring))
4471 /* Seed all CQEs by 1s */
4472 bnx2x_fp(bp, index, rx_comp_ring) =
4473 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4474 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4475 if (!bnx2x_fp(bp, index, rx_comp_ring))
4479 bnx2x_fp(bp, index, rx_page_ring) =
4480 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4482 if (!bnx2x_fp(bp, index, rx_page_ring))
4484 bnx2x_fp(bp, index, rx_sge_ring) =
4485 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4486 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4487 if (!bnx2x_fp(bp, index, rx_sge_ring))
4490 bnx2x_set_next_page_rx_bd(fp);
4493 bnx2x_set_next_page_rx_cq(fp);
4496 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4497 if (ring_size < rx_ring_size)
4503 /* handles low memory cases */
4505 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4507 /* FW will drop all packets if queue is not big enough,
4508 * In these cases we disable the queue
4509 * Min size is different for OOO, TPA and non-TPA queues
4511 if (ring_size < (fp->disable_tpa ?
4512 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4513 /* release memory allocated for this queue */
4514 bnx2x_free_fp_mem_at(bp, index);
4520 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4524 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4525 /* we will fail load process instead of mark
4533 static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4537 /* 1. Allocate FP for leading - fatal if error
4538 * 2. Allocate RSS - fix number of queues if error
4542 if (bnx2x_alloc_fp_mem_at(bp, 0))
4546 for_each_nondefault_eth_queue(bp, i)
4547 if (bnx2x_alloc_fp_mem_at(bp, i))
4550 /* handle memory failures */
4551 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4552 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4555 bnx2x_shrink_eth_fp(bp, delta);
4556 if (CNIC_SUPPORT(bp))
4557 /* move non eth FPs next to last eth FP
4558 * must be done in that order
4559 * FCOE_IDX < FWD_IDX < OOO_IDX
4562 /* move FCoE fp even NO_FCOE_FLAG is on */
4563 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4564 bp->num_ethernet_queues -= delta;
4565 bp->num_queues = bp->num_ethernet_queues +
4566 bp->num_cnic_queues;
4567 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4568 bp->num_queues + delta, bp->num_queues);
4574 void bnx2x_free_mem_bp(struct bnx2x *bp)
4578 for (i = 0; i < bp->fp_array_size; i++)
4579 kfree(bp->fp[i].tpa_info);
4582 kfree(bp->fp_stats);
4583 kfree(bp->bnx2x_txq);
4584 kfree(bp->msix_table);
4588 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4590 struct bnx2x_fastpath *fp;
4591 struct msix_entry *tbl;
4592 struct bnx2x_ilt *ilt;
4593 int msix_table_size = 0;
4594 int fp_array_size, txq_array_size;
4598 * The biggest MSI-X table we might need is as a maximum number of fast
4599 * path IGU SBs plus default SB (for PF only).
4601 msix_table_size = bp->igu_sb_cnt;
4604 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4606 /* fp array: RSS plus CNIC related L2 queues */
4607 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4608 bp->fp_array_size = fp_array_size;
4609 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4611 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4614 for (i = 0; i < bp->fp_array_size; i++) {
4616 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4617 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4618 if (!(fp[i].tpa_info))
4624 /* allocate sp objs */
4625 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4630 /* allocate fp_stats */
4631 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4636 /* Allocate memory for the transmission queues array */
4638 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4639 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4641 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4647 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4650 bp->msix_table = tbl;
4653 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4660 bnx2x_free_mem_bp(bp);
4664 int bnx2x_reload_if_running(struct net_device *dev)
4666 struct bnx2x *bp = netdev_priv(dev);
4668 if (unlikely(!netif_running(dev)))
4671 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4672 return bnx2x_nic_load(bp, LOAD_NORMAL);
4675 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4677 u32 sel_phy_idx = 0;
4678 if (bp->link_params.num_phys <= 1)
4681 if (bp->link_vars.link_up) {
4682 sel_phy_idx = EXT_PHY1;
4683 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4684 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4685 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4686 sel_phy_idx = EXT_PHY2;
4689 switch (bnx2x_phy_selection(&bp->link_params)) {
4690 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4691 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4692 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4693 sel_phy_idx = EXT_PHY1;
4695 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4696 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4697 sel_phy_idx = EXT_PHY2;
4704 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4706 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4708 * The selected activated PHY is always after swapping (in case PHY
4709 * swapping is enabled). So when swapping is enabled, we need to reverse
4713 if (bp->link_params.multi_phy_config &
4714 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4715 if (sel_phy_idx == EXT_PHY1)
4716 sel_phy_idx = EXT_PHY2;
4717 else if (sel_phy_idx == EXT_PHY2)
4718 sel_phy_idx = EXT_PHY1;
4720 return LINK_CONFIG_IDX(sel_phy_idx);
4723 #ifdef NETDEV_FCOE_WWNN
4724 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4726 struct bnx2x *bp = netdev_priv(dev);
4727 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4730 case NETDEV_FCOE_WWNN:
4731 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4732 cp->fcoe_wwn_node_name_lo);
4734 case NETDEV_FCOE_WWPN:
4735 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4736 cp->fcoe_wwn_port_name_lo);
4739 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4747 /* called with rtnl_lock */
4748 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4750 struct bnx2x *bp = netdev_priv(dev);
4752 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4753 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4757 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4758 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4759 BNX2X_ERR("Can't support requested MTU size\n");
4763 /* This does not race with packet allocation
4764 * because the actual alloc size is
4765 * only updated as part of load
4769 return bnx2x_reload_if_running(dev);
4772 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4773 netdev_features_t features)
4775 struct bnx2x *bp = netdev_priv(dev);
4777 /* TPA requires Rx CSUM offloading */
4778 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
4779 features &= ~NETIF_F_LRO;
4780 features &= ~NETIF_F_GRO;
4786 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4788 struct bnx2x *bp = netdev_priv(dev);
4789 u32 flags = bp->flags;
4791 bool bnx2x_reload = false;
4793 if (features & NETIF_F_LRO)
4794 flags |= TPA_ENABLE_FLAG;
4796 flags &= ~TPA_ENABLE_FLAG;
4798 if (features & NETIF_F_GRO)
4799 flags |= GRO_ENABLE_FLAG;
4801 flags &= ~GRO_ENABLE_FLAG;
4803 if (features & NETIF_F_LOOPBACK) {
4804 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4805 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4806 bnx2x_reload = true;
4809 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4810 bp->link_params.loopback_mode = LOOPBACK_NONE;
4811 bnx2x_reload = true;
4815 changes = flags ^ bp->flags;
4817 /* if GRO is changed while LRO is enabled, don't force a reload */
4818 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4819 changes &= ~GRO_ENABLE_FLAG;
4822 bnx2x_reload = true;
4827 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4828 return bnx2x_reload_if_running(dev);
4829 /* else: bnx2x_nic_load() will be called at end of recovery */
4835 void bnx2x_tx_timeout(struct net_device *dev)
4837 struct bnx2x *bp = netdev_priv(dev);
4839 #ifdef BNX2X_STOP_ON_ERROR
4844 /* This allows the netif to be shutdown gracefully before resetting */
4845 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
4848 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4850 struct net_device *dev = pci_get_drvdata(pdev);
4854 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4857 bp = netdev_priv(dev);
4861 pci_save_state(pdev);
4863 if (!netif_running(dev)) {
4868 netif_device_detach(dev);
4870 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4872 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4879 int bnx2x_resume(struct pci_dev *pdev)
4881 struct net_device *dev = pci_get_drvdata(pdev);
4886 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4889 bp = netdev_priv(dev);
4891 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4892 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4898 pci_restore_state(pdev);
4900 if (!netif_running(dev)) {
4905 bnx2x_set_power_state(bp, PCI_D0);
4906 netif_device_attach(dev);
4908 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4915 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4919 BNX2X_ERR("bad context pointer %p\n", cxt);
4923 /* ustorm cxt validation */
4924 cxt->ustorm_ag_context.cdu_usage =
4925 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4926 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4927 /* xcontext validation */
4928 cxt->xstorm_ag_context.cdu_reserved =
4929 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4930 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4933 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4934 u8 fw_sb_id, u8 sb_index,
4937 u32 addr = BAR_CSTRORM_INTMEM +
4938 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4939 REG_WR8(bp, addr, ticks);
4941 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4942 port, fw_sb_id, sb_index, ticks);
4945 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4946 u16 fw_sb_id, u8 sb_index,
4949 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4950 u32 addr = BAR_CSTRORM_INTMEM +
4951 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4952 u8 flags = REG_RD8(bp, addr);
4954 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4955 flags |= enable_flag;
4956 REG_WR8(bp, addr, flags);
4958 "port %x fw_sb_id %d sb_index %d disable %d\n",
4959 port, fw_sb_id, sb_index, disable);
4962 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4963 u8 sb_index, u8 disable, u16 usec)
4965 int port = BP_PORT(bp);
4966 u8 ticks = usec / BNX2X_BTR;
4968 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4970 disable = disable ? 1 : (usec ? 0 : 1);
4971 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4974 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
4977 smp_mb__before_atomic();
4978 set_bit(flag, &bp->sp_rtnl_state);
4979 smp_mb__after_atomic();
4980 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
4982 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4984 EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);