]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
Merge branch 'next' into for-linus
[karo-tx-linux.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
1 /* bnx2x_cmn.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2013 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
23 #include <linux/ip.h>
24 #include <net/tcp.h>
25 #include <net/ipv6.h>
26 #include <net/ip6_checksum.h>
27 #include <net/busy_poll.h>
28 #include <linux/prefetch.h>
29 #include "bnx2x_cmn.h"
30 #include "bnx2x_init.h"
31 #include "bnx2x_sp.h"
32
33 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
34 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
35 static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
36 static int bnx2x_poll(struct napi_struct *napi, int budget);
37
38 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
39 {
40         int i;
41
42         /* Add NAPI objects */
43         for_each_rx_queue_cnic(bp, i) {
44                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
45                                bnx2x_poll, NAPI_POLL_WEIGHT);
46                 napi_hash_add(&bnx2x_fp(bp, i, napi));
47         }
48 }
49
50 static void bnx2x_add_all_napi(struct bnx2x *bp)
51 {
52         int i;
53
54         /* Add NAPI objects */
55         for_each_eth_queue(bp, i) {
56                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
57                                bnx2x_poll, NAPI_POLL_WEIGHT);
58                 napi_hash_add(&bnx2x_fp(bp, i, napi));
59         }
60 }
61
62 static int bnx2x_calc_num_queues(struct bnx2x *bp)
63 {
64         int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
65
66         /* Reduce memory usage in kdump environment by using only one queue */
67         if (reset_devices)
68                 nq = 1;
69
70         nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
71         return nq;
72 }
73
74 /**
75  * bnx2x_move_fp - move content of the fastpath structure.
76  *
77  * @bp:         driver handle
78  * @from:       source FP index
79  * @to:         destination FP index
80  *
81  * Makes sure the contents of the bp->fp[to].napi is kept
82  * intact. This is done by first copying the napi struct from
83  * the target to the source, and then mem copying the entire
84  * source onto the target. Update txdata pointers and related
85  * content.
86  */
87 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
88 {
89         struct bnx2x_fastpath *from_fp = &bp->fp[from];
90         struct bnx2x_fastpath *to_fp = &bp->fp[to];
91         struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
92         struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
93         struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
94         struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
95         int old_max_eth_txqs, new_max_eth_txqs;
96         int old_txdata_index = 0, new_txdata_index = 0;
97         struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
98
99         /* Copy the NAPI object as it has been already initialized */
100         from_fp->napi = to_fp->napi;
101
102         /* Move bnx2x_fastpath contents */
103         memcpy(to_fp, from_fp, sizeof(*to_fp));
104         to_fp->index = to;
105
106         /* Retain the tpa_info of the original `to' version as we don't want
107          * 2 FPs to contain the same tpa_info pointer.
108          */
109         to_fp->tpa_info = old_tpa_info;
110
111         /* move sp_objs contents as well, as their indices match fp ones */
112         memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
113
114         /* move fp_stats contents as well, as their indices match fp ones */
115         memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
116
117         /* Update txdata pointers in fp and move txdata content accordingly:
118          * Each fp consumes 'max_cos' txdata structures, so the index should be
119          * decremented by max_cos x delta.
120          */
121
122         old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
123         new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
124                                 (bp)->max_cos;
125         if (from == FCOE_IDX(bp)) {
126                 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
127                 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
128         }
129
130         memcpy(&bp->bnx2x_txq[new_txdata_index],
131                &bp->bnx2x_txq[old_txdata_index],
132                sizeof(struct bnx2x_fp_txdata));
133         to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
134 }
135
136 /**
137  * bnx2x_fill_fw_str - Fill buffer with FW version string.
138  *
139  * @bp:        driver handle
140  * @buf:       character buffer to fill with the fw name
141  * @buf_len:   length of the above buffer
142  *
143  */
144 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
145 {
146         if (IS_PF(bp)) {
147                 u8 phy_fw_ver[PHY_FW_VER_LEN];
148
149                 phy_fw_ver[0] = '\0';
150                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
151                                              phy_fw_ver, PHY_FW_VER_LEN);
152                 strlcpy(buf, bp->fw_ver, buf_len);
153                 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
154                          "bc %d.%d.%d%s%s",
155                          (bp->common.bc_ver & 0xff0000) >> 16,
156                          (bp->common.bc_ver & 0xff00) >> 8,
157                          (bp->common.bc_ver & 0xff),
158                          ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
159         } else {
160                 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
161         }
162 }
163
164 /**
165  * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
166  *
167  * @bp: driver handle
168  * @delta:      number of eth queues which were not allocated
169  */
170 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
171 {
172         int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
173
174         /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
175          * backward along the array could cause memory to be overridden
176          */
177         for (cos = 1; cos < bp->max_cos; cos++) {
178                 for (i = 0; i < old_eth_num - delta; i++) {
179                         struct bnx2x_fastpath *fp = &bp->fp[i];
180                         int new_idx = cos * (old_eth_num - delta) + i;
181
182                         memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
183                                sizeof(struct bnx2x_fp_txdata));
184                         fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
185                 }
186         }
187 }
188
189 int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
190
191 /* free skb in the packet ring at pos idx
192  * return idx of last bd freed
193  */
194 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
195                              u16 idx, unsigned int *pkts_compl,
196                              unsigned int *bytes_compl)
197 {
198         struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
199         struct eth_tx_start_bd *tx_start_bd;
200         struct eth_tx_bd *tx_data_bd;
201         struct sk_buff *skb = tx_buf->skb;
202         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
203         int nbd;
204         u16 split_bd_len = 0;
205
206         /* prefetch skb end pointer to speedup dev_kfree_skb() */
207         prefetch(&skb->end);
208
209         DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
210            txdata->txq_index, idx, tx_buf, skb);
211
212         tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
213
214         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
215 #ifdef BNX2X_STOP_ON_ERROR
216         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
217                 BNX2X_ERR("BAD nbd!\n");
218                 bnx2x_panic();
219         }
220 #endif
221         new_cons = nbd + tx_buf->first_bd;
222
223         /* Get the next bd */
224         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
225
226         /* Skip a parse bd... */
227         --nbd;
228         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
229
230         /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
231         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
232                 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
233                 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
234                 --nbd;
235                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
236         }
237
238         /* unmap first bd */
239         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
240                          BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
241                          DMA_TO_DEVICE);
242
243         /* now free frags */
244         while (nbd > 0) {
245
246                 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
247                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
248                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
249                 if (--nbd)
250                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
251         }
252
253         /* release skb */
254         WARN_ON(!skb);
255         if (likely(skb)) {
256                 (*pkts_compl)++;
257                 (*bytes_compl) += skb->len;
258         }
259
260         dev_kfree_skb_any(skb);
261         tx_buf->first_bd = 0;
262         tx_buf->skb = NULL;
263
264         return new_cons;
265 }
266
267 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
268 {
269         struct netdev_queue *txq;
270         u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
271         unsigned int pkts_compl = 0, bytes_compl = 0;
272
273 #ifdef BNX2X_STOP_ON_ERROR
274         if (unlikely(bp->panic))
275                 return -1;
276 #endif
277
278         txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
279         hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
280         sw_cons = txdata->tx_pkt_cons;
281
282         while (sw_cons != hw_cons) {
283                 u16 pkt_cons;
284
285                 pkt_cons = TX_BD(sw_cons);
286
287                 DP(NETIF_MSG_TX_DONE,
288                    "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
289                    txdata->txq_index, hw_cons, sw_cons, pkt_cons);
290
291                 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
292                                             &pkts_compl, &bytes_compl);
293
294                 sw_cons++;
295         }
296
297         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
298
299         txdata->tx_pkt_cons = sw_cons;
300         txdata->tx_bd_cons = bd_cons;
301
302         /* Need to make the tx_bd_cons update visible to start_xmit()
303          * before checking for netif_tx_queue_stopped().  Without the
304          * memory barrier, there is a small possibility that
305          * start_xmit() will miss it and cause the queue to be stopped
306          * forever.
307          * On the other hand we need an rmb() here to ensure the proper
308          * ordering of bit testing in the following
309          * netif_tx_queue_stopped(txq) call.
310          */
311         smp_mb();
312
313         if (unlikely(netif_tx_queue_stopped(txq))) {
314                 /* Taking tx_lock() is needed to prevent re-enabling the queue
315                  * while it's empty. This could have happen if rx_action() gets
316                  * suspended in bnx2x_tx_int() after the condition before
317                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
318                  *
319                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
320                  * sends some packets consuming the whole queue again->
321                  * stops the queue
322                  */
323
324                 __netif_tx_lock(txq, smp_processor_id());
325
326                 if ((netif_tx_queue_stopped(txq)) &&
327                     (bp->state == BNX2X_STATE_OPEN) &&
328                     (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
329                         netif_tx_wake_queue(txq);
330
331                 __netif_tx_unlock(txq);
332         }
333         return 0;
334 }
335
336 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
337                                              u16 idx)
338 {
339         u16 last_max = fp->last_max_sge;
340
341         if (SUB_S16(idx, last_max) > 0)
342                 fp->last_max_sge = idx;
343 }
344
345 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
346                                          u16 sge_len,
347                                          struct eth_end_agg_rx_cqe *cqe)
348 {
349         struct bnx2x *bp = fp->bp;
350         u16 last_max, last_elem, first_elem;
351         u16 delta = 0;
352         u16 i;
353
354         if (!sge_len)
355                 return;
356
357         /* First mark all used pages */
358         for (i = 0; i < sge_len; i++)
359                 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
360                         RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
361
362         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
363            sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
364
365         /* Here we assume that the last SGE index is the biggest */
366         prefetch((void *)(fp->sge_mask));
367         bnx2x_update_last_max_sge(fp,
368                 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
369
370         last_max = RX_SGE(fp->last_max_sge);
371         last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
372         first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
373
374         /* If ring is not full */
375         if (last_elem + 1 != first_elem)
376                 last_elem++;
377
378         /* Now update the prod */
379         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
380                 if (likely(fp->sge_mask[i]))
381                         break;
382
383                 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
384                 delta += BIT_VEC64_ELEM_SZ;
385         }
386
387         if (delta > 0) {
388                 fp->rx_sge_prod += delta;
389                 /* clear page-end entries */
390                 bnx2x_clear_sge_mask_next_elems(fp);
391         }
392
393         DP(NETIF_MSG_RX_STATUS,
394            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
395            fp->last_max_sge, fp->rx_sge_prod);
396 }
397
398 /* Get Toeplitz hash value in the skb using the value from the
399  * CQE (calculated by HW).
400  */
401 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
402                             const struct eth_fast_path_rx_cqe *cqe,
403                             enum pkt_hash_types *rxhash_type)
404 {
405         /* Get Toeplitz hash from CQE */
406         if ((bp->dev->features & NETIF_F_RXHASH) &&
407             (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
408                 enum eth_rss_hash_type htype;
409
410                 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
411                 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
412                                 (htype == TCP_IPV6_HASH_TYPE)) ?
413                                PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
414
415                 return le32_to_cpu(cqe->rss_hash_result);
416         }
417         *rxhash_type = PKT_HASH_TYPE_NONE;
418         return 0;
419 }
420
421 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
422                             u16 cons, u16 prod,
423                             struct eth_fast_path_rx_cqe *cqe)
424 {
425         struct bnx2x *bp = fp->bp;
426         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
427         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
428         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
429         dma_addr_t mapping;
430         struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
431         struct sw_rx_bd *first_buf = &tpa_info->first_buf;
432
433         /* print error if current state != stop */
434         if (tpa_info->tpa_state != BNX2X_TPA_STOP)
435                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
436
437         /* Try to map an empty data buffer from the aggregation info  */
438         mapping = dma_map_single(&bp->pdev->dev,
439                                  first_buf->data + NET_SKB_PAD,
440                                  fp->rx_buf_size, DMA_FROM_DEVICE);
441         /*
442          *  ...if it fails - move the skb from the consumer to the producer
443          *  and set the current aggregation state as ERROR to drop it
444          *  when TPA_STOP arrives.
445          */
446
447         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
448                 /* Move the BD from the consumer to the producer */
449                 bnx2x_reuse_rx_data(fp, cons, prod);
450                 tpa_info->tpa_state = BNX2X_TPA_ERROR;
451                 return;
452         }
453
454         /* move empty data from pool to prod */
455         prod_rx_buf->data = first_buf->data;
456         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
457         /* point prod_bd to new data */
458         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
459         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
460
461         /* move partial skb from cons to pool (don't unmap yet) */
462         *first_buf = *cons_rx_buf;
463
464         /* mark bin state as START */
465         tpa_info->parsing_flags =
466                 le16_to_cpu(cqe->pars_flags.flags);
467         tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
468         tpa_info->tpa_state = BNX2X_TPA_START;
469         tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
470         tpa_info->placement_offset = cqe->placement_offset;
471         tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
472         if (fp->mode == TPA_MODE_GRO) {
473                 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
474                 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
475                 tpa_info->gro_size = gro_size;
476         }
477
478 #ifdef BNX2X_STOP_ON_ERROR
479         fp->tpa_queue_used |= (1 << queue);
480 #ifdef _ASM_GENERIC_INT_L64_H
481         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
482 #else
483         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
484 #endif
485            fp->tpa_queue_used);
486 #endif
487 }
488
489 /* Timestamp option length allowed for TPA aggregation:
490  *
491  *              nop nop kind length echo val
492  */
493 #define TPA_TSTAMP_OPT_LEN      12
494 /**
495  * bnx2x_set_gro_params - compute GRO values
496  *
497  * @skb:                packet skb
498  * @parsing_flags:      parsing flags from the START CQE
499  * @len_on_bd:          total length of the first packet for the
500  *                      aggregation.
501  * @pkt_len:            length of all segments
502  *
503  * Approximate value of the MSS for this aggregation calculated using
504  * the first packet of it.
505  * Compute number of aggregated segments, and gso_type.
506  */
507 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
508                                  u16 len_on_bd, unsigned int pkt_len,
509                                  u16 num_of_coalesced_segs)
510 {
511         /* TPA aggregation won't have either IP options or TCP options
512          * other than timestamp or IPv6 extension headers.
513          */
514         u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
515
516         if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
517             PRS_FLAG_OVERETH_IPV6) {
518                 hdrs_len += sizeof(struct ipv6hdr);
519                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
520         } else {
521                 hdrs_len += sizeof(struct iphdr);
522                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
523         }
524
525         /* Check if there was a TCP timestamp, if there is it's will
526          * always be 12 bytes length: nop nop kind length echo val.
527          *
528          * Otherwise FW would close the aggregation.
529          */
530         if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
531                 hdrs_len += TPA_TSTAMP_OPT_LEN;
532
533         skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
534
535         /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
536          * to skb_shinfo(skb)->gso_segs
537          */
538         NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
539 }
540
541 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
542                               u16 index, gfp_t gfp_mask)
543 {
544         struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
545         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
546         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
547         dma_addr_t mapping;
548
549         if (unlikely(page == NULL)) {
550                 BNX2X_ERR("Can't alloc sge\n");
551                 return -ENOMEM;
552         }
553
554         mapping = dma_map_page(&bp->pdev->dev, page, 0,
555                                SGE_PAGES, DMA_FROM_DEVICE);
556         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
557                 __free_pages(page, PAGES_PER_SGE_SHIFT);
558                 BNX2X_ERR("Can't map sge\n");
559                 return -ENOMEM;
560         }
561
562         sw_buf->page = page;
563         dma_unmap_addr_set(sw_buf, mapping, mapping);
564
565         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
566         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
567
568         return 0;
569 }
570
571 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
572                                struct bnx2x_agg_info *tpa_info,
573                                u16 pages,
574                                struct sk_buff *skb,
575                                struct eth_end_agg_rx_cqe *cqe,
576                                u16 cqe_idx)
577 {
578         struct sw_rx_page *rx_pg, old_rx_pg;
579         u32 i, frag_len, frag_size;
580         int err, j, frag_id = 0;
581         u16 len_on_bd = tpa_info->len_on_bd;
582         u16 full_page = 0, gro_size = 0;
583
584         frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
585
586         if (fp->mode == TPA_MODE_GRO) {
587                 gro_size = tpa_info->gro_size;
588                 full_page = tpa_info->full_page;
589         }
590
591         /* This is needed in order to enable forwarding support */
592         if (frag_size)
593                 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
594                                      le16_to_cpu(cqe->pkt_len),
595                                      le16_to_cpu(cqe->num_of_coalesced_segs));
596
597 #ifdef BNX2X_STOP_ON_ERROR
598         if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
599                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
600                           pages, cqe_idx);
601                 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
602                 bnx2x_panic();
603                 return -EINVAL;
604         }
605 #endif
606
607         /* Run through the SGL and compose the fragmented skb */
608         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
609                 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
610
611                 /* FW gives the indices of the SGE as if the ring is an array
612                    (meaning that "next" element will consume 2 indices) */
613                 if (fp->mode == TPA_MODE_GRO)
614                         frag_len = min_t(u32, frag_size, (u32)full_page);
615                 else /* LRO */
616                         frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
617
618                 rx_pg = &fp->rx_page_ring[sge_idx];
619                 old_rx_pg = *rx_pg;
620
621                 /* If we fail to allocate a substitute page, we simply stop
622                    where we are and drop the whole packet */
623                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
624                 if (unlikely(err)) {
625                         bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
626                         return err;
627                 }
628
629                 /* Unmap the page as we're going to pass it to the stack */
630                 dma_unmap_page(&bp->pdev->dev,
631                                dma_unmap_addr(&old_rx_pg, mapping),
632                                SGE_PAGES, DMA_FROM_DEVICE);
633                 /* Add one frag and update the appropriate fields in the skb */
634                 if (fp->mode == TPA_MODE_LRO)
635                         skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
636                 else { /* GRO */
637                         int rem;
638                         int offset = 0;
639                         for (rem = frag_len; rem > 0; rem -= gro_size) {
640                                 int len = rem > gro_size ? gro_size : rem;
641                                 skb_fill_page_desc(skb, frag_id++,
642                                                    old_rx_pg.page, offset, len);
643                                 if (offset)
644                                         get_page(old_rx_pg.page);
645                                 offset += len;
646                         }
647                 }
648
649                 skb->data_len += frag_len;
650                 skb->truesize += SGE_PAGES;
651                 skb->len += frag_len;
652
653                 frag_size -= frag_len;
654         }
655
656         return 0;
657 }
658
659 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
660 {
661         if (fp->rx_frag_size)
662                 put_page(virt_to_head_page(data));
663         else
664                 kfree(data);
665 }
666
667 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
668 {
669         if (fp->rx_frag_size) {
670                 /* GFP_KERNEL allocations are used only during initialization */
671                 if (unlikely(gfp_mask & __GFP_WAIT))
672                         return (void *)__get_free_page(gfp_mask);
673
674                 return netdev_alloc_frag(fp->rx_frag_size);
675         }
676
677         return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
678 }
679
680 #ifdef CONFIG_INET
681 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
682 {
683         const struct iphdr *iph = ip_hdr(skb);
684         struct tcphdr *th;
685
686         skb_set_transport_header(skb, sizeof(struct iphdr));
687         th = tcp_hdr(skb);
688
689         th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
690                                   iph->saddr, iph->daddr, 0);
691 }
692
693 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
694 {
695         struct ipv6hdr *iph = ipv6_hdr(skb);
696         struct tcphdr *th;
697
698         skb_set_transport_header(skb, sizeof(struct ipv6hdr));
699         th = tcp_hdr(skb);
700
701         th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
702                                   &iph->saddr, &iph->daddr, 0);
703 }
704
705 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
706                             void (*gro_func)(struct bnx2x*, struct sk_buff*))
707 {
708         skb_set_network_header(skb, 0);
709         gro_func(bp, skb);
710         tcp_gro_complete(skb);
711 }
712 #endif
713
714 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
715                                struct sk_buff *skb)
716 {
717 #ifdef CONFIG_INET
718         if (skb_shinfo(skb)->gso_size) {
719                 switch (be16_to_cpu(skb->protocol)) {
720                 case ETH_P_IP:
721                         bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
722                         break;
723                 case ETH_P_IPV6:
724                         bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
725                         break;
726                 default:
727                         BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
728                                   be16_to_cpu(skb->protocol));
729                 }
730         }
731 #endif
732         skb_record_rx_queue(skb, fp->rx_queue);
733         napi_gro_receive(&fp->napi, skb);
734 }
735
736 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
737                            struct bnx2x_agg_info *tpa_info,
738                            u16 pages,
739                            struct eth_end_agg_rx_cqe *cqe,
740                            u16 cqe_idx)
741 {
742         struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
743         u8 pad = tpa_info->placement_offset;
744         u16 len = tpa_info->len_on_bd;
745         struct sk_buff *skb = NULL;
746         u8 *new_data, *data = rx_buf->data;
747         u8 old_tpa_state = tpa_info->tpa_state;
748
749         tpa_info->tpa_state = BNX2X_TPA_STOP;
750
751         /* If we there was an error during the handling of the TPA_START -
752          * drop this aggregation.
753          */
754         if (old_tpa_state == BNX2X_TPA_ERROR)
755                 goto drop;
756
757         /* Try to allocate the new data */
758         new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
759         /* Unmap skb in the pool anyway, as we are going to change
760            pool entry status to BNX2X_TPA_STOP even if new skb allocation
761            fails. */
762         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
763                          fp->rx_buf_size, DMA_FROM_DEVICE);
764         if (likely(new_data))
765                 skb = build_skb(data, fp->rx_frag_size);
766
767         if (likely(skb)) {
768 #ifdef BNX2X_STOP_ON_ERROR
769                 if (pad + len > fp->rx_buf_size) {
770                         BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
771                                   pad, len, fp->rx_buf_size);
772                         bnx2x_panic();
773                         return;
774                 }
775 #endif
776
777                 skb_reserve(skb, pad + NET_SKB_PAD);
778                 skb_put(skb, len);
779                 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
780
781                 skb->protocol = eth_type_trans(skb, bp->dev);
782                 skb->ip_summed = CHECKSUM_UNNECESSARY;
783
784                 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
785                                          skb, cqe, cqe_idx)) {
786                         if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
787                                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
788                         bnx2x_gro_receive(bp, fp, skb);
789                 } else {
790                         DP(NETIF_MSG_RX_STATUS,
791                            "Failed to allocate new pages - dropping packet!\n");
792                         dev_kfree_skb_any(skb);
793                 }
794
795                 /* put new data in bin */
796                 rx_buf->data = new_data;
797
798                 return;
799         }
800         if (new_data)
801                 bnx2x_frag_free(fp, new_data);
802 drop:
803         /* drop the packet and keep the buffer in the bin */
804         DP(NETIF_MSG_RX_STATUS,
805            "Failed to allocate or map a new skb - dropping packet!\n");
806         bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
807 }
808
809 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
810                                u16 index, gfp_t gfp_mask)
811 {
812         u8 *data;
813         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
814         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
815         dma_addr_t mapping;
816
817         data = bnx2x_frag_alloc(fp, gfp_mask);
818         if (unlikely(data == NULL))
819                 return -ENOMEM;
820
821         mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
822                                  fp->rx_buf_size,
823                                  DMA_FROM_DEVICE);
824         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
825                 bnx2x_frag_free(fp, data);
826                 BNX2X_ERR("Can't map rx data\n");
827                 return -ENOMEM;
828         }
829
830         rx_buf->data = data;
831         dma_unmap_addr_set(rx_buf, mapping, mapping);
832
833         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
834         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
835
836         return 0;
837 }
838
839 static
840 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
841                                  struct bnx2x_fastpath *fp,
842                                  struct bnx2x_eth_q_stats *qstats)
843 {
844         /* Do nothing if no L4 csum validation was done.
845          * We do not check whether IP csum was validated. For IPv4 we assume
846          * that if the card got as far as validating the L4 csum, it also
847          * validated the IP csum. IPv6 has no IP csum.
848          */
849         if (cqe->fast_path_cqe.status_flags &
850             ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
851                 return;
852
853         /* If L4 validation was done, check if an error was found. */
854
855         if (cqe->fast_path_cqe.type_error_flags &
856             (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
857              ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
858                 qstats->hw_csum_err++;
859         else
860                 skb->ip_summed = CHECKSUM_UNNECESSARY;
861 }
862
863 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
864 {
865         struct bnx2x *bp = fp->bp;
866         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
867         u16 sw_comp_cons, sw_comp_prod;
868         int rx_pkt = 0;
869         union eth_rx_cqe *cqe;
870         struct eth_fast_path_rx_cqe *cqe_fp;
871
872 #ifdef BNX2X_STOP_ON_ERROR
873         if (unlikely(bp->panic))
874                 return 0;
875 #endif
876         if (budget <= 0)
877                 return rx_pkt;
878
879         bd_cons = fp->rx_bd_cons;
880         bd_prod = fp->rx_bd_prod;
881         bd_prod_fw = bd_prod;
882         sw_comp_cons = fp->rx_comp_cons;
883         sw_comp_prod = fp->rx_comp_prod;
884
885         comp_ring_cons = RCQ_BD(sw_comp_cons);
886         cqe = &fp->rx_comp_ring[comp_ring_cons];
887         cqe_fp = &cqe->fast_path_cqe;
888
889         DP(NETIF_MSG_RX_STATUS,
890            "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
891
892         while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
893                 struct sw_rx_bd *rx_buf = NULL;
894                 struct sk_buff *skb;
895                 u8 cqe_fp_flags;
896                 enum eth_rx_cqe_type cqe_fp_type;
897                 u16 len, pad, queue;
898                 u8 *data;
899                 u32 rxhash;
900                 enum pkt_hash_types rxhash_type;
901
902 #ifdef BNX2X_STOP_ON_ERROR
903                 if (unlikely(bp->panic))
904                         return 0;
905 #endif
906
907                 bd_prod = RX_BD(bd_prod);
908                 bd_cons = RX_BD(bd_cons);
909
910                 /* A rmb() is required to ensure that the CQE is not read
911                  * before it is written by the adapter DMA.  PCI ordering
912                  * rules will make sure the other fields are written before
913                  * the marker at the end of struct eth_fast_path_rx_cqe
914                  * but without rmb() a weakly ordered processor can process
915                  * stale data.  Without the barrier TPA state-machine might
916                  * enter inconsistent state and kernel stack might be
917                  * provided with incorrect packet description - these lead
918                  * to various kernel crashed.
919                  */
920                 rmb();
921
922                 cqe_fp_flags = cqe_fp->type_error_flags;
923                 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
924
925                 DP(NETIF_MSG_RX_STATUS,
926                    "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
927                    CQE_TYPE(cqe_fp_flags),
928                    cqe_fp_flags, cqe_fp->status_flags,
929                    le32_to_cpu(cqe_fp->rss_hash_result),
930                    le16_to_cpu(cqe_fp->vlan_tag),
931                    le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
932
933                 /* is this a slowpath msg? */
934                 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
935                         bnx2x_sp_event(fp, cqe);
936                         goto next_cqe;
937                 }
938
939                 rx_buf = &fp->rx_buf_ring[bd_cons];
940                 data = rx_buf->data;
941
942                 if (!CQE_TYPE_FAST(cqe_fp_type)) {
943                         struct bnx2x_agg_info *tpa_info;
944                         u16 frag_size, pages;
945 #ifdef BNX2X_STOP_ON_ERROR
946                         /* sanity check */
947                         if (fp->disable_tpa &&
948                             (CQE_TYPE_START(cqe_fp_type) ||
949                              CQE_TYPE_STOP(cqe_fp_type)))
950                                 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
951                                           CQE_TYPE(cqe_fp_type));
952 #endif
953
954                         if (CQE_TYPE_START(cqe_fp_type)) {
955                                 u16 queue = cqe_fp->queue_index;
956                                 DP(NETIF_MSG_RX_STATUS,
957                                    "calling tpa_start on queue %d\n",
958                                    queue);
959
960                                 bnx2x_tpa_start(fp, queue,
961                                                 bd_cons, bd_prod,
962                                                 cqe_fp);
963
964                                 goto next_rx;
965                         }
966                         queue = cqe->end_agg_cqe.queue_index;
967                         tpa_info = &fp->tpa_info[queue];
968                         DP(NETIF_MSG_RX_STATUS,
969                            "calling tpa_stop on queue %d\n",
970                            queue);
971
972                         frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
973                                     tpa_info->len_on_bd;
974
975                         if (fp->mode == TPA_MODE_GRO)
976                                 pages = (frag_size + tpa_info->full_page - 1) /
977                                          tpa_info->full_page;
978                         else
979                                 pages = SGE_PAGE_ALIGN(frag_size) >>
980                                         SGE_PAGE_SHIFT;
981
982                         bnx2x_tpa_stop(bp, fp, tpa_info, pages,
983                                        &cqe->end_agg_cqe, comp_ring_cons);
984 #ifdef BNX2X_STOP_ON_ERROR
985                         if (bp->panic)
986                                 return 0;
987 #endif
988
989                         bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
990                         goto next_cqe;
991                 }
992                 /* non TPA */
993                 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
994                 pad = cqe_fp->placement_offset;
995                 dma_sync_single_for_cpu(&bp->pdev->dev,
996                                         dma_unmap_addr(rx_buf, mapping),
997                                         pad + RX_COPY_THRESH,
998                                         DMA_FROM_DEVICE);
999                 pad += NET_SKB_PAD;
1000                 prefetch(data + pad); /* speedup eth_type_trans() */
1001                 /* is this an error packet? */
1002                 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1003                         DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1004                            "ERROR  flags %x  rx packet %u\n",
1005                            cqe_fp_flags, sw_comp_cons);
1006                         bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1007                         goto reuse_rx;
1008                 }
1009
1010                 /* Since we don't have a jumbo ring
1011                  * copy small packets if mtu > 1500
1012                  */
1013                 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1014                     (len <= RX_COPY_THRESH)) {
1015                         skb = netdev_alloc_skb_ip_align(bp->dev, len);
1016                         if (skb == NULL) {
1017                                 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1018                                    "ERROR  packet dropped because of alloc failure\n");
1019                                 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1020                                 goto reuse_rx;
1021                         }
1022                         memcpy(skb->data, data + pad, len);
1023                         bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1024                 } else {
1025                         if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1026                                                        GFP_ATOMIC) == 0)) {
1027                                 dma_unmap_single(&bp->pdev->dev,
1028                                                  dma_unmap_addr(rx_buf, mapping),
1029                                                  fp->rx_buf_size,
1030                                                  DMA_FROM_DEVICE);
1031                                 skb = build_skb(data, fp->rx_frag_size);
1032                                 if (unlikely(!skb)) {
1033                                         bnx2x_frag_free(fp, data);
1034                                         bnx2x_fp_qstats(bp, fp)->
1035                                                         rx_skb_alloc_failed++;
1036                                         goto next_rx;
1037                                 }
1038                                 skb_reserve(skb, pad);
1039                         } else {
1040                                 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1041                                    "ERROR  packet dropped because of alloc failure\n");
1042                                 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1043 reuse_rx:
1044                                 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1045                                 goto next_rx;
1046                         }
1047                 }
1048
1049                 skb_put(skb, len);
1050                 skb->protocol = eth_type_trans(skb, bp->dev);
1051
1052                 /* Set Toeplitz hash for a none-LRO skb */
1053                 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1054                 skb_set_hash(skb, rxhash, rxhash_type);
1055
1056                 skb_checksum_none_assert(skb);
1057
1058                 if (bp->dev->features & NETIF_F_RXCSUM)
1059                         bnx2x_csum_validate(skb, cqe, fp,
1060                                             bnx2x_fp_qstats(bp, fp));
1061
1062                 skb_record_rx_queue(skb, fp->rx_queue);
1063
1064                 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1065                     PARSING_FLAGS_VLAN)
1066                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1067                                                le16_to_cpu(cqe_fp->vlan_tag));
1068
1069                 skb_mark_napi_id(skb, &fp->napi);
1070
1071                 if (bnx2x_fp_ll_polling(fp))
1072                         netif_receive_skb(skb);
1073                 else
1074                         napi_gro_receive(&fp->napi, skb);
1075 next_rx:
1076                 rx_buf->data = NULL;
1077
1078                 bd_cons = NEXT_RX_IDX(bd_cons);
1079                 bd_prod = NEXT_RX_IDX(bd_prod);
1080                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1081                 rx_pkt++;
1082 next_cqe:
1083                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1084                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1085
1086                 /* mark CQE as free */
1087                 BNX2X_SEED_CQE(cqe_fp);
1088
1089                 if (rx_pkt == budget)
1090                         break;
1091
1092                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1093                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1094                 cqe_fp = &cqe->fast_path_cqe;
1095         } /* while */
1096
1097         fp->rx_bd_cons = bd_cons;
1098         fp->rx_bd_prod = bd_prod_fw;
1099         fp->rx_comp_cons = sw_comp_cons;
1100         fp->rx_comp_prod = sw_comp_prod;
1101
1102         /* Update producers */
1103         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1104                              fp->rx_sge_prod);
1105
1106         fp->rx_pkt += rx_pkt;
1107         fp->rx_calls++;
1108
1109         return rx_pkt;
1110 }
1111
1112 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1113 {
1114         struct bnx2x_fastpath *fp = fp_cookie;
1115         struct bnx2x *bp = fp->bp;
1116         u8 cos;
1117
1118         DP(NETIF_MSG_INTR,
1119            "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1120            fp->index, fp->fw_sb_id, fp->igu_sb_id);
1121
1122         bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1123
1124 #ifdef BNX2X_STOP_ON_ERROR
1125         if (unlikely(bp->panic))
1126                 return IRQ_HANDLED;
1127 #endif
1128
1129         /* Handle Rx and Tx according to MSI-X vector */
1130         for_each_cos_in_tx_queue(fp, cos)
1131                 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1132
1133         prefetch(&fp->sb_running_index[SM_RX_ID]);
1134         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1135
1136         return IRQ_HANDLED;
1137 }
1138
1139 /* HW Lock for shared dual port PHYs */
1140 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1141 {
1142         mutex_lock(&bp->port.phy_mutex);
1143
1144         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1145 }
1146
1147 void bnx2x_release_phy_lock(struct bnx2x *bp)
1148 {
1149         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1150
1151         mutex_unlock(&bp->port.phy_mutex);
1152 }
1153
1154 /* calculates MF speed according to current linespeed and MF configuration */
1155 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1156 {
1157         u16 line_speed = bp->link_vars.line_speed;
1158         if (IS_MF(bp)) {
1159                 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1160                                                    bp->mf_config[BP_VN(bp)]);
1161
1162                 /* Calculate the current MAX line speed limit for the MF
1163                  * devices
1164                  */
1165                 if (IS_MF_SI(bp))
1166                         line_speed = (line_speed * maxCfg) / 100;
1167                 else { /* SD mode */
1168                         u16 vn_max_rate = maxCfg * 100;
1169
1170                         if (vn_max_rate < line_speed)
1171                                 line_speed = vn_max_rate;
1172                 }
1173         }
1174
1175         return line_speed;
1176 }
1177
1178 /**
1179  * bnx2x_fill_report_data - fill link report data to report
1180  *
1181  * @bp:         driver handle
1182  * @data:       link state to update
1183  *
1184  * It uses a none-atomic bit operations because is called under the mutex.
1185  */
1186 static void bnx2x_fill_report_data(struct bnx2x *bp,
1187                                    struct bnx2x_link_report_data *data)
1188 {
1189         u16 line_speed = bnx2x_get_mf_speed(bp);
1190
1191         memset(data, 0, sizeof(*data));
1192
1193         /* Fill the report data: effective line speed */
1194         data->line_speed = line_speed;
1195
1196         /* Link is down */
1197         if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1198                 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1199                           &data->link_report_flags);
1200
1201         /* Full DUPLEX */
1202         if (bp->link_vars.duplex == DUPLEX_FULL)
1203                 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1204
1205         /* Rx Flow Control is ON */
1206         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1207                 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1208
1209         /* Tx Flow Control is ON */
1210         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1211                 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1212 }
1213
1214 /**
1215  * bnx2x_link_report - report link status to OS.
1216  *
1217  * @bp:         driver handle
1218  *
1219  * Calls the __bnx2x_link_report() under the same locking scheme
1220  * as a link/PHY state managing code to ensure a consistent link
1221  * reporting.
1222  */
1223
1224 void bnx2x_link_report(struct bnx2x *bp)
1225 {
1226         bnx2x_acquire_phy_lock(bp);
1227         __bnx2x_link_report(bp);
1228         bnx2x_release_phy_lock(bp);
1229 }
1230
1231 /**
1232  * __bnx2x_link_report - report link status to OS.
1233  *
1234  * @bp:         driver handle
1235  *
1236  * None atomic implementation.
1237  * Should be called under the phy_lock.
1238  */
1239 void __bnx2x_link_report(struct bnx2x *bp)
1240 {
1241         struct bnx2x_link_report_data cur_data;
1242
1243         /* reread mf_cfg */
1244         if (IS_PF(bp) && !CHIP_IS_E1(bp))
1245                 bnx2x_read_mf_cfg(bp);
1246
1247         /* Read the current link report info */
1248         bnx2x_fill_report_data(bp, &cur_data);
1249
1250         /* Don't report link down or exactly the same link status twice */
1251         if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1252             (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1253                       &bp->last_reported_link.link_report_flags) &&
1254              test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1255                       &cur_data.link_report_flags)))
1256                 return;
1257
1258         bp->link_cnt++;
1259
1260         /* We are going to report a new link parameters now -
1261          * remember the current data for the next time.
1262          */
1263         memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1264
1265         if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1266                      &cur_data.link_report_flags)) {
1267                 netif_carrier_off(bp->dev);
1268                 netdev_err(bp->dev, "NIC Link is Down\n");
1269                 return;
1270         } else {
1271                 const char *duplex;
1272                 const char *flow;
1273
1274                 netif_carrier_on(bp->dev);
1275
1276                 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1277                                        &cur_data.link_report_flags))
1278                         duplex = "full";
1279                 else
1280                         duplex = "half";
1281
1282                 /* Handle the FC at the end so that only these flags would be
1283                  * possibly set. This way we may easily check if there is no FC
1284                  * enabled.
1285                  */
1286                 if (cur_data.link_report_flags) {
1287                         if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1288                                      &cur_data.link_report_flags)) {
1289                                 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1290                                      &cur_data.link_report_flags))
1291                                         flow = "ON - receive & transmit";
1292                                 else
1293                                         flow = "ON - receive";
1294                         } else {
1295                                 flow = "ON - transmit";
1296                         }
1297                 } else {
1298                         flow = "none";
1299                 }
1300                 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1301                             cur_data.line_speed, duplex, flow);
1302         }
1303 }
1304
1305 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1306 {
1307         int i;
1308
1309         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1310                 struct eth_rx_sge *sge;
1311
1312                 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1313                 sge->addr_hi =
1314                         cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1315                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1316
1317                 sge->addr_lo =
1318                         cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1319                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1320         }
1321 }
1322
1323 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1324                                 struct bnx2x_fastpath *fp, int last)
1325 {
1326         int i;
1327
1328         for (i = 0; i < last; i++) {
1329                 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1330                 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1331                 u8 *data = first_buf->data;
1332
1333                 if (data == NULL) {
1334                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1335                         continue;
1336                 }
1337                 if (tpa_info->tpa_state == BNX2X_TPA_START)
1338                         dma_unmap_single(&bp->pdev->dev,
1339                                          dma_unmap_addr(first_buf, mapping),
1340                                          fp->rx_buf_size, DMA_FROM_DEVICE);
1341                 bnx2x_frag_free(fp, data);
1342                 first_buf->data = NULL;
1343         }
1344 }
1345
1346 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1347 {
1348         int j;
1349
1350         for_each_rx_queue_cnic(bp, j) {
1351                 struct bnx2x_fastpath *fp = &bp->fp[j];
1352
1353                 fp->rx_bd_cons = 0;
1354
1355                 /* Activate BD ring */
1356                 /* Warning!
1357                  * this will generate an interrupt (to the TSTORM)
1358                  * must only be done after chip is initialized
1359                  */
1360                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1361                                      fp->rx_sge_prod);
1362         }
1363 }
1364
1365 void bnx2x_init_rx_rings(struct bnx2x *bp)
1366 {
1367         int func = BP_FUNC(bp);
1368         u16 ring_prod;
1369         int i, j;
1370
1371         /* Allocate TPA resources */
1372         for_each_eth_queue(bp, j) {
1373                 struct bnx2x_fastpath *fp = &bp->fp[j];
1374
1375                 DP(NETIF_MSG_IFUP,
1376                    "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1377
1378                 if (!fp->disable_tpa) {
1379                         /* Fill the per-aggregation pool */
1380                         for (i = 0; i < MAX_AGG_QS(bp); i++) {
1381                                 struct bnx2x_agg_info *tpa_info =
1382                                         &fp->tpa_info[i];
1383                                 struct sw_rx_bd *first_buf =
1384                                         &tpa_info->first_buf;
1385
1386                                 first_buf->data =
1387                                         bnx2x_frag_alloc(fp, GFP_KERNEL);
1388                                 if (!first_buf->data) {
1389                                         BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1390                                                   j);
1391                                         bnx2x_free_tpa_pool(bp, fp, i);
1392                                         fp->disable_tpa = 1;
1393                                         break;
1394                                 }
1395                                 dma_unmap_addr_set(first_buf, mapping, 0);
1396                                 tpa_info->tpa_state = BNX2X_TPA_STOP;
1397                         }
1398
1399                         /* "next page" elements initialization */
1400                         bnx2x_set_next_page_sgl(fp);
1401
1402                         /* set SGEs bit mask */
1403                         bnx2x_init_sge_ring_bit_mask(fp);
1404
1405                         /* Allocate SGEs and initialize the ring elements */
1406                         for (i = 0, ring_prod = 0;
1407                              i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1408
1409                                 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1410                                                        GFP_KERNEL) < 0) {
1411                                         BNX2X_ERR("was only able to allocate %d rx sges\n",
1412                                                   i);
1413                                         BNX2X_ERR("disabling TPA for queue[%d]\n",
1414                                                   j);
1415                                         /* Cleanup already allocated elements */
1416                                         bnx2x_free_rx_sge_range(bp, fp,
1417                                                                 ring_prod);
1418                                         bnx2x_free_tpa_pool(bp, fp,
1419                                                             MAX_AGG_QS(bp));
1420                                         fp->disable_tpa = 1;
1421                                         ring_prod = 0;
1422                                         break;
1423                                 }
1424                                 ring_prod = NEXT_SGE_IDX(ring_prod);
1425                         }
1426
1427                         fp->rx_sge_prod = ring_prod;
1428                 }
1429         }
1430
1431         for_each_eth_queue(bp, j) {
1432                 struct bnx2x_fastpath *fp = &bp->fp[j];
1433
1434                 fp->rx_bd_cons = 0;
1435
1436                 /* Activate BD ring */
1437                 /* Warning!
1438                  * this will generate an interrupt (to the TSTORM)
1439                  * must only be done after chip is initialized
1440                  */
1441                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1442                                      fp->rx_sge_prod);
1443
1444                 if (j != 0)
1445                         continue;
1446
1447                 if (CHIP_IS_E1(bp)) {
1448                         REG_WR(bp, BAR_USTRORM_INTMEM +
1449                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1450                                U64_LO(fp->rx_comp_mapping));
1451                         REG_WR(bp, BAR_USTRORM_INTMEM +
1452                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1453                                U64_HI(fp->rx_comp_mapping));
1454                 }
1455         }
1456 }
1457
1458 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1459 {
1460         u8 cos;
1461         struct bnx2x *bp = fp->bp;
1462
1463         for_each_cos_in_tx_queue(fp, cos) {
1464                 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1465                 unsigned pkts_compl = 0, bytes_compl = 0;
1466
1467                 u16 sw_prod = txdata->tx_pkt_prod;
1468                 u16 sw_cons = txdata->tx_pkt_cons;
1469
1470                 while (sw_cons != sw_prod) {
1471                         bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1472                                           &pkts_compl, &bytes_compl);
1473                         sw_cons++;
1474                 }
1475
1476                 netdev_tx_reset_queue(
1477                         netdev_get_tx_queue(bp->dev,
1478                                             txdata->txq_index));
1479         }
1480 }
1481
1482 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1483 {
1484         int i;
1485
1486         for_each_tx_queue_cnic(bp, i) {
1487                 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1488         }
1489 }
1490
1491 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1492 {
1493         int i;
1494
1495         for_each_eth_queue(bp, i) {
1496                 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1497         }
1498 }
1499
1500 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1501 {
1502         struct bnx2x *bp = fp->bp;
1503         int i;
1504
1505         /* ring wasn't allocated */
1506         if (fp->rx_buf_ring == NULL)
1507                 return;
1508
1509         for (i = 0; i < NUM_RX_BD; i++) {
1510                 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1511                 u8 *data = rx_buf->data;
1512
1513                 if (data == NULL)
1514                         continue;
1515                 dma_unmap_single(&bp->pdev->dev,
1516                                  dma_unmap_addr(rx_buf, mapping),
1517                                  fp->rx_buf_size, DMA_FROM_DEVICE);
1518
1519                 rx_buf->data = NULL;
1520                 bnx2x_frag_free(fp, data);
1521         }
1522 }
1523
1524 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1525 {
1526         int j;
1527
1528         for_each_rx_queue_cnic(bp, j) {
1529                 bnx2x_free_rx_bds(&bp->fp[j]);
1530         }
1531 }
1532
1533 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1534 {
1535         int j;
1536
1537         for_each_eth_queue(bp, j) {
1538                 struct bnx2x_fastpath *fp = &bp->fp[j];
1539
1540                 bnx2x_free_rx_bds(fp);
1541
1542                 if (!fp->disable_tpa)
1543                         bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1544         }
1545 }
1546
1547 static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1548 {
1549         bnx2x_free_tx_skbs_cnic(bp);
1550         bnx2x_free_rx_skbs_cnic(bp);
1551 }
1552
1553 void bnx2x_free_skbs(struct bnx2x *bp)
1554 {
1555         bnx2x_free_tx_skbs(bp);
1556         bnx2x_free_rx_skbs(bp);
1557 }
1558
1559 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1560 {
1561         /* load old values */
1562         u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1563
1564         if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1565                 /* leave all but MAX value */
1566                 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1567
1568                 /* set new MAX value */
1569                 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1570                                 & FUNC_MF_CFG_MAX_BW_MASK;
1571
1572                 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1573         }
1574 }
1575
1576 /**
1577  * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1578  *
1579  * @bp:         driver handle
1580  * @nvecs:      number of vectors to be released
1581  */
1582 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1583 {
1584         int i, offset = 0;
1585
1586         if (nvecs == offset)
1587                 return;
1588
1589         /* VFs don't have a default SB */
1590         if (IS_PF(bp)) {
1591                 free_irq(bp->msix_table[offset].vector, bp->dev);
1592                 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1593                    bp->msix_table[offset].vector);
1594                 offset++;
1595         }
1596
1597         if (CNIC_SUPPORT(bp)) {
1598                 if (nvecs == offset)
1599                         return;
1600                 offset++;
1601         }
1602
1603         for_each_eth_queue(bp, i) {
1604                 if (nvecs == offset)
1605                         return;
1606                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1607                    i, bp->msix_table[offset].vector);
1608
1609                 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1610         }
1611 }
1612
1613 void bnx2x_free_irq(struct bnx2x *bp)
1614 {
1615         if (bp->flags & USING_MSIX_FLAG &&
1616             !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1617                 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1618
1619                 /* vfs don't have a default status block */
1620                 if (IS_PF(bp))
1621                         nvecs++;
1622
1623                 bnx2x_free_msix_irqs(bp, nvecs);
1624         } else {
1625                 free_irq(bp->dev->irq, bp->dev);
1626         }
1627 }
1628
1629 int bnx2x_enable_msix(struct bnx2x *bp)
1630 {
1631         int msix_vec = 0, i, rc;
1632
1633         /* VFs don't have a default status block */
1634         if (IS_PF(bp)) {
1635                 bp->msix_table[msix_vec].entry = msix_vec;
1636                 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1637                                bp->msix_table[0].entry);
1638                 msix_vec++;
1639         }
1640
1641         /* Cnic requires an msix vector for itself */
1642         if (CNIC_SUPPORT(bp)) {
1643                 bp->msix_table[msix_vec].entry = msix_vec;
1644                 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1645                                msix_vec, bp->msix_table[msix_vec].entry);
1646                 msix_vec++;
1647         }
1648
1649         /* We need separate vectors for ETH queues only (not FCoE) */
1650         for_each_eth_queue(bp, i) {
1651                 bp->msix_table[msix_vec].entry = msix_vec;
1652                 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1653                                msix_vec, msix_vec, i);
1654                 msix_vec++;
1655         }
1656
1657         DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1658            msix_vec);
1659
1660         rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1661                                    BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1662         /*
1663          * reconfigure number of tx/rx queues according to available
1664          * MSI-X vectors
1665          */
1666         if (rc == -ENOSPC) {
1667                 /* Get by with single vector */
1668                 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1669                 if (rc < 0) {
1670                         BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1671                                        rc);
1672                         goto no_msix;
1673                 }
1674
1675                 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1676                 bp->flags |= USING_SINGLE_MSIX_FLAG;
1677
1678                 BNX2X_DEV_INFO("set number of queues to 1\n");
1679                 bp->num_ethernet_queues = 1;
1680                 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1681         } else if (rc < 0) {
1682                 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1683                 goto no_msix;
1684         } else if (rc < msix_vec) {
1685                 /* how less vectors we will have? */
1686                 int diff = msix_vec - rc;
1687
1688                 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1689
1690                 /*
1691                  * decrease number of queues by number of unallocated entries
1692                  */
1693                 bp->num_ethernet_queues -= diff;
1694                 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1695
1696                 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1697                                bp->num_queues);
1698         }
1699
1700         bp->flags |= USING_MSIX_FLAG;
1701
1702         return 0;
1703
1704 no_msix:
1705         /* fall to INTx if not enough memory */
1706         if (rc == -ENOMEM)
1707                 bp->flags |= DISABLE_MSI_FLAG;
1708
1709         return rc;
1710 }
1711
1712 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1713 {
1714         int i, rc, offset = 0;
1715
1716         /* no default status block for vf */
1717         if (IS_PF(bp)) {
1718                 rc = request_irq(bp->msix_table[offset++].vector,
1719                                  bnx2x_msix_sp_int, 0,
1720                                  bp->dev->name, bp->dev);
1721                 if (rc) {
1722                         BNX2X_ERR("request sp irq failed\n");
1723                         return -EBUSY;
1724                 }
1725         }
1726
1727         if (CNIC_SUPPORT(bp))
1728                 offset++;
1729
1730         for_each_eth_queue(bp, i) {
1731                 struct bnx2x_fastpath *fp = &bp->fp[i];
1732                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1733                          bp->dev->name, i);
1734
1735                 rc = request_irq(bp->msix_table[offset].vector,
1736                                  bnx2x_msix_fp_int, 0, fp->name, fp);
1737                 if (rc) {
1738                         BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
1739                               bp->msix_table[offset].vector, rc);
1740                         bnx2x_free_msix_irqs(bp, offset);
1741                         return -EBUSY;
1742                 }
1743
1744                 offset++;
1745         }
1746
1747         i = BNX2X_NUM_ETH_QUEUES(bp);
1748         if (IS_PF(bp)) {
1749                 offset = 1 + CNIC_SUPPORT(bp);
1750                 netdev_info(bp->dev,
1751                             "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
1752                             bp->msix_table[0].vector,
1753                             0, bp->msix_table[offset].vector,
1754                             i - 1, bp->msix_table[offset + i - 1].vector);
1755         } else {
1756                 offset = CNIC_SUPPORT(bp);
1757                 netdev_info(bp->dev,
1758                             "using MSI-X  IRQs: fp[%d] %d ... fp[%d] %d\n",
1759                             0, bp->msix_table[offset].vector,
1760                             i - 1, bp->msix_table[offset + i - 1].vector);
1761         }
1762         return 0;
1763 }
1764
1765 int bnx2x_enable_msi(struct bnx2x *bp)
1766 {
1767         int rc;
1768
1769         rc = pci_enable_msi(bp->pdev);
1770         if (rc) {
1771                 BNX2X_DEV_INFO("MSI is not attainable\n");
1772                 return -1;
1773         }
1774         bp->flags |= USING_MSI_FLAG;
1775
1776         return 0;
1777 }
1778
1779 static int bnx2x_req_irq(struct bnx2x *bp)
1780 {
1781         unsigned long flags;
1782         unsigned int irq;
1783
1784         if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1785                 flags = 0;
1786         else
1787                 flags = IRQF_SHARED;
1788
1789         if (bp->flags & USING_MSIX_FLAG)
1790                 irq = bp->msix_table[0].vector;
1791         else
1792                 irq = bp->pdev->irq;
1793
1794         return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1795 }
1796
1797 static int bnx2x_setup_irqs(struct bnx2x *bp)
1798 {
1799         int rc = 0;
1800         if (bp->flags & USING_MSIX_FLAG &&
1801             !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1802                 rc = bnx2x_req_msix_irqs(bp);
1803                 if (rc)
1804                         return rc;
1805         } else {
1806                 rc = bnx2x_req_irq(bp);
1807                 if (rc) {
1808                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
1809                         return rc;
1810                 }
1811                 if (bp->flags & USING_MSI_FLAG) {
1812                         bp->dev->irq = bp->pdev->irq;
1813                         netdev_info(bp->dev, "using MSI IRQ %d\n",
1814                                     bp->dev->irq);
1815                 }
1816                 if (bp->flags & USING_MSIX_FLAG) {
1817                         bp->dev->irq = bp->msix_table[0].vector;
1818                         netdev_info(bp->dev, "using MSIX IRQ %d\n",
1819                                     bp->dev->irq);
1820                 }
1821         }
1822
1823         return 0;
1824 }
1825
1826 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1827 {
1828         int i;
1829
1830         for_each_rx_queue_cnic(bp, i) {
1831                 bnx2x_fp_init_lock(&bp->fp[i]);
1832                 napi_enable(&bnx2x_fp(bp, i, napi));
1833         }
1834 }
1835
1836 static void bnx2x_napi_enable(struct bnx2x *bp)
1837 {
1838         int i;
1839
1840         for_each_eth_queue(bp, i) {
1841                 bnx2x_fp_init_lock(&bp->fp[i]);
1842                 napi_enable(&bnx2x_fp(bp, i, napi));
1843         }
1844 }
1845
1846 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1847 {
1848         int i;
1849
1850         for_each_rx_queue_cnic(bp, i) {
1851                 napi_disable(&bnx2x_fp(bp, i, napi));
1852                 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1853                         usleep_range(1000, 2000);
1854         }
1855 }
1856
1857 static void bnx2x_napi_disable(struct bnx2x *bp)
1858 {
1859         int i;
1860
1861         for_each_eth_queue(bp, i) {
1862                 napi_disable(&bnx2x_fp(bp, i, napi));
1863                 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1864                         usleep_range(1000, 2000);
1865         }
1866 }
1867
1868 void bnx2x_netif_start(struct bnx2x *bp)
1869 {
1870         if (netif_running(bp->dev)) {
1871                 bnx2x_napi_enable(bp);
1872                 if (CNIC_LOADED(bp))
1873                         bnx2x_napi_enable_cnic(bp);
1874                 bnx2x_int_enable(bp);
1875                 if (bp->state == BNX2X_STATE_OPEN)
1876                         netif_tx_wake_all_queues(bp->dev);
1877         }
1878 }
1879
1880 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1881 {
1882         bnx2x_int_disable_sync(bp, disable_hw);
1883         bnx2x_napi_disable(bp);
1884         if (CNIC_LOADED(bp))
1885                 bnx2x_napi_disable_cnic(bp);
1886 }
1887
1888 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1889                        void *accel_priv, select_queue_fallback_t fallback)
1890 {
1891         struct bnx2x *bp = netdev_priv(dev);
1892
1893         if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1894                 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1895                 u16 ether_type = ntohs(hdr->h_proto);
1896
1897                 /* Skip VLAN tag if present */
1898                 if (ether_type == ETH_P_8021Q) {
1899                         struct vlan_ethhdr *vhdr =
1900                                 (struct vlan_ethhdr *)skb->data;
1901
1902                         ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1903                 }
1904
1905                 /* If ethertype is FCoE or FIP - use FCoE ring */
1906                 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1907                         return bnx2x_fcoe_tx(bp, txq_index);
1908         }
1909
1910         /* select a non-FCoE queue */
1911         return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
1912 }
1913
1914 void bnx2x_set_num_queues(struct bnx2x *bp)
1915 {
1916         /* RSS queues */
1917         bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1918
1919         /* override in STORAGE SD modes */
1920         if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1921                 bp->num_ethernet_queues = 1;
1922
1923         /* Add special queues */
1924         bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1925         bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1926
1927         BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1928 }
1929
1930 /**
1931  * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1932  *
1933  * @bp:         Driver handle
1934  *
1935  * We currently support for at most 16 Tx queues for each CoS thus we will
1936  * allocate a multiple of 16 for ETH L2 rings according to the value of the
1937  * bp->max_cos.
1938  *
1939  * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1940  * index after all ETH L2 indices.
1941  *
1942  * If the actual number of Tx queues (for each CoS) is less than 16 then there
1943  * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1944  * 16..31,...) with indices that are not coupled with any real Tx queue.
1945  *
1946  * The proper configuration of skb->queue_mapping is handled by
1947  * bnx2x_select_queue() and __skb_tx_hash().
1948  *
1949  * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1950  * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1951  */
1952 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1953 {
1954         int rc, tx, rx;
1955
1956         tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1957         rx = BNX2X_NUM_ETH_QUEUES(bp);
1958
1959 /* account for fcoe queue */
1960         if (include_cnic && !NO_FCOE(bp)) {
1961                 rx++;
1962                 tx++;
1963         }
1964
1965         rc = netif_set_real_num_tx_queues(bp->dev, tx);
1966         if (rc) {
1967                 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1968                 return rc;
1969         }
1970         rc = netif_set_real_num_rx_queues(bp->dev, rx);
1971         if (rc) {
1972                 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1973                 return rc;
1974         }
1975
1976         DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1977                           tx, rx);
1978
1979         return rc;
1980 }
1981
1982 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1983 {
1984         int i;
1985
1986         for_each_queue(bp, i) {
1987                 struct bnx2x_fastpath *fp = &bp->fp[i];
1988                 u32 mtu;
1989
1990                 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1991                 if (IS_FCOE_IDX(i))
1992                         /*
1993                          * Although there are no IP frames expected to arrive to
1994                          * this ring we still want to add an
1995                          * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1996                          * overrun attack.
1997                          */
1998                         mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1999                 else
2000                         mtu = bp->dev->mtu;
2001                 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2002                                   IP_HEADER_ALIGNMENT_PADDING +
2003                                   ETH_OVREHEAD +
2004                                   mtu +
2005                                   BNX2X_FW_RX_ALIGN_END;
2006                 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2007                 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2008                         fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2009                 else
2010                         fp->rx_frag_size = 0;
2011         }
2012 }
2013
2014 static int bnx2x_init_rss(struct bnx2x *bp)
2015 {
2016         int i;
2017         u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2018
2019         /* Prepare the initial contents for the indirection table if RSS is
2020          * enabled
2021          */
2022         for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2023                 bp->rss_conf_obj.ind_table[i] =
2024                         bp->fp->cl_id +
2025                         ethtool_rxfh_indir_default(i, num_eth_queues);
2026
2027         /*
2028          * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2029          * per-port, so if explicit configuration is needed , do it only
2030          * for a PMF.
2031          *
2032          * For 57712 and newer on the other hand it's a per-function
2033          * configuration.
2034          */
2035         return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2036 }
2037
2038 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2039               bool config_hash, bool enable)
2040 {
2041         struct bnx2x_config_rss_params params = {NULL};
2042
2043         /* Although RSS is meaningless when there is a single HW queue we
2044          * still need it enabled in order to have HW Rx hash generated.
2045          *
2046          * if (!is_eth_multi(bp))
2047          *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
2048          */
2049
2050         params.rss_obj = rss_obj;
2051
2052         __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2053
2054         if (enable) {
2055                 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
2056
2057                 /* RSS configuration */
2058                 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2059                 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2060                 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2061                 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2062                 if (rss_obj->udp_rss_v4)
2063                         __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2064                 if (rss_obj->udp_rss_v6)
2065                         __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2066         } else {
2067                 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2068         }
2069
2070         /* Hash bits */
2071         params.rss_result_mask = MULTI_MASK;
2072
2073         memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2074
2075         if (config_hash) {
2076                 /* RSS keys */
2077                 prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
2078                 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
2079         }
2080
2081         if (IS_PF(bp))
2082                 return bnx2x_config_rss(bp, &params);
2083         else
2084                 return bnx2x_vfpf_config_rss(bp, &params);
2085 }
2086
2087 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2088 {
2089         struct bnx2x_func_state_params func_params = {NULL};
2090
2091         /* Prepare parameters for function state transitions */
2092         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2093
2094         func_params.f_obj = &bp->func_obj;
2095         func_params.cmd = BNX2X_F_CMD_HW_INIT;
2096
2097         func_params.params.hw_init.load_phase = load_code;
2098
2099         return bnx2x_func_state_change(bp, &func_params);
2100 }
2101
2102 /*
2103  * Cleans the object that have internal lists without sending
2104  * ramrods. Should be run when interrupts are disabled.
2105  */
2106 void bnx2x_squeeze_objects(struct bnx2x *bp)
2107 {
2108         int rc;
2109         unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2110         struct bnx2x_mcast_ramrod_params rparam = {NULL};
2111         struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2112
2113         /***************** Cleanup MACs' object first *************************/
2114
2115         /* Wait for completion of requested */
2116         __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2117         /* Perform a dry cleanup */
2118         __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2119
2120         /* Clean ETH primary MAC */
2121         __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2122         rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2123                                  &ramrod_flags);
2124         if (rc != 0)
2125                 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2126
2127         /* Cleanup UC list */
2128         vlan_mac_flags = 0;
2129         __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2130         rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2131                                  &ramrod_flags);
2132         if (rc != 0)
2133                 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2134
2135         /***************** Now clean mcast object *****************************/
2136         rparam.mcast_obj = &bp->mcast_obj;
2137         __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2138
2139         /* Add a DEL command... - Since we're doing a driver cleanup only,
2140          * we take a lock surrounding both the initial send and the CONTs,
2141          * as we don't want a true completion to disrupt us in the middle.
2142          */
2143         netif_addr_lock_bh(bp->dev);
2144         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2145         if (rc < 0)
2146                 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2147                           rc);
2148
2149         /* ...and wait until all pending commands are cleared */
2150         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2151         while (rc != 0) {
2152                 if (rc < 0) {
2153                         BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2154                                   rc);
2155                         netif_addr_unlock_bh(bp->dev);
2156                         return;
2157                 }
2158
2159                 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2160         }
2161         netif_addr_unlock_bh(bp->dev);
2162 }
2163
2164 #ifndef BNX2X_STOP_ON_ERROR
2165 #define LOAD_ERROR_EXIT(bp, label) \
2166         do { \
2167                 (bp)->state = BNX2X_STATE_ERROR; \
2168                 goto label; \
2169         } while (0)
2170
2171 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2172         do { \
2173                 bp->cnic_loaded = false; \
2174                 goto label; \
2175         } while (0)
2176 #else /*BNX2X_STOP_ON_ERROR*/
2177 #define LOAD_ERROR_EXIT(bp, label) \
2178         do { \
2179                 (bp)->state = BNX2X_STATE_ERROR; \
2180                 (bp)->panic = 1; \
2181                 return -EBUSY; \
2182         } while (0)
2183 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2184         do { \
2185                 bp->cnic_loaded = false; \
2186                 (bp)->panic = 1; \
2187                 return -EBUSY; \
2188         } while (0)
2189 #endif /*BNX2X_STOP_ON_ERROR*/
2190
2191 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2192 {
2193         BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2194                        bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2195         return;
2196 }
2197
2198 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2199 {
2200         int num_groups, vf_headroom = 0;
2201         int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2202
2203         /* number of queues for statistics is number of eth queues + FCoE */
2204         u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2205
2206         /* Total number of FW statistics requests =
2207          * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2208          * and fcoe l2 queue) stats + num of queues (which includes another 1
2209          * for fcoe l2 queue if applicable)
2210          */
2211         bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2212
2213         /* vf stats appear in the request list, but their data is allocated by
2214          * the VFs themselves. We don't include them in the bp->fw_stats_num as
2215          * it is used to determine where to place the vf stats queries in the
2216          * request struct
2217          */
2218         if (IS_SRIOV(bp))
2219                 vf_headroom = bnx2x_vf_headroom(bp);
2220
2221         /* Request is built from stats_query_header and an array of
2222          * stats_query_cmd_group each of which contains
2223          * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2224          * configured in the stats_query_header.
2225          */
2226         num_groups =
2227                 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2228                  (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2229                  1 : 0));
2230
2231         DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2232            bp->fw_stats_num, vf_headroom, num_groups);
2233         bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2234                 num_groups * sizeof(struct stats_query_cmd_group);
2235
2236         /* Data for statistics requests + stats_counter
2237          * stats_counter holds per-STORM counters that are incremented
2238          * when STORM has finished with the current request.
2239          * memory for FCoE offloaded statistics are counted anyway,
2240          * even if they will not be sent.
2241          * VF stats are not accounted for here as the data of VF stats is stored
2242          * in memory allocated by the VF, not here.
2243          */
2244         bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2245                 sizeof(struct per_pf_stats) +
2246                 sizeof(struct fcoe_statistics_params) +
2247                 sizeof(struct per_queue_stats) * num_queue_stats +
2248                 sizeof(struct stats_counter);
2249
2250         bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2251                                        bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2252         if (!bp->fw_stats)
2253                 goto alloc_mem_err;
2254
2255         /* Set shortcuts */
2256         bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2257         bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2258         bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2259                 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2260         bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2261                 bp->fw_stats_req_sz;
2262
2263         DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2264            U64_HI(bp->fw_stats_req_mapping),
2265            U64_LO(bp->fw_stats_req_mapping));
2266         DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2267            U64_HI(bp->fw_stats_data_mapping),
2268            U64_LO(bp->fw_stats_data_mapping));
2269         return 0;
2270
2271 alloc_mem_err:
2272         bnx2x_free_fw_stats_mem(bp);
2273         BNX2X_ERR("Can't allocate FW stats memory\n");
2274         return -ENOMEM;
2275 }
2276
2277 /* send load request to mcp and analyze response */
2278 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2279 {
2280         u32 param;
2281
2282         /* init fw_seq */
2283         bp->fw_seq =
2284                 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2285                  DRV_MSG_SEQ_NUMBER_MASK);
2286         BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2287
2288         /* Get current FW pulse sequence */
2289         bp->fw_drv_pulse_wr_seq =
2290                 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2291                  DRV_PULSE_SEQ_MASK);
2292         BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2293
2294         param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2295
2296         if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2297                 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2298
2299         /* load request */
2300         (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2301
2302         /* if mcp fails to respond we must abort */
2303         if (!(*load_code)) {
2304                 BNX2X_ERR("MCP response failure, aborting\n");
2305                 return -EBUSY;
2306         }
2307
2308         /* If mcp refused (e.g. other port is in diagnostic mode) we
2309          * must abort
2310          */
2311         if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2312                 BNX2X_ERR("MCP refused load request, aborting\n");
2313                 return -EBUSY;
2314         }
2315         return 0;
2316 }
2317
2318 /* check whether another PF has already loaded FW to chip. In
2319  * virtualized environments a pf from another VM may have already
2320  * initialized the device including loading FW
2321  */
2322 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2323 {
2324         /* is another pf loaded on this engine? */
2325         if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2326             load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2327                 /* build my FW version dword */
2328                 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2329                         (BCM_5710_FW_MINOR_VERSION << 8) +
2330                         (BCM_5710_FW_REVISION_VERSION << 16) +
2331                         (BCM_5710_FW_ENGINEERING_VERSION << 24);
2332
2333                 /* read loaded FW from chip */
2334                 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2335
2336                 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2337                    loaded_fw, my_fw);
2338
2339                 /* abort nic load if version mismatch */
2340                 if (my_fw != loaded_fw) {
2341                         if (print_err)
2342                                 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2343                                           loaded_fw, my_fw);
2344                         else
2345                                 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2346                                                loaded_fw, my_fw);
2347                         return -EBUSY;
2348                 }
2349         }
2350         return 0;
2351 }
2352
2353 /* returns the "mcp load_code" according to global load_count array */
2354 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2355 {
2356         int path = BP_PATH(bp);
2357
2358         DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
2359            path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2360            bnx2x_load_count[path][2]);
2361         bnx2x_load_count[path][0]++;
2362         bnx2x_load_count[path][1 + port]++;
2363         DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
2364            path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2365            bnx2x_load_count[path][2]);
2366         if (bnx2x_load_count[path][0] == 1)
2367                 return FW_MSG_CODE_DRV_LOAD_COMMON;
2368         else if (bnx2x_load_count[path][1 + port] == 1)
2369                 return FW_MSG_CODE_DRV_LOAD_PORT;
2370         else
2371                 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2372 }
2373
2374 /* mark PMF if applicable */
2375 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2376 {
2377         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2378             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2379             (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2380                 bp->port.pmf = 1;
2381                 /* We need the barrier to ensure the ordering between the
2382                  * writing to bp->port.pmf here and reading it from the
2383                  * bnx2x_periodic_task().
2384                  */
2385                 smp_mb();
2386         } else {
2387                 bp->port.pmf = 0;
2388         }
2389
2390         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2391 }
2392
2393 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2394 {
2395         if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2396              (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2397             (bp->common.shmem2_base)) {
2398                 if (SHMEM2_HAS(bp, dcc_support))
2399                         SHMEM2_WR(bp, dcc_support,
2400                                   (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2401                                    SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2402                 if (SHMEM2_HAS(bp, afex_driver_support))
2403                         SHMEM2_WR(bp, afex_driver_support,
2404                                   SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2405         }
2406
2407         /* Set AFEX default VLAN tag to an invalid value */
2408         bp->afex_def_vlan_tag = -1;
2409 }
2410
2411 /**
2412  * bnx2x_bz_fp - zero content of the fastpath structure.
2413  *
2414  * @bp:         driver handle
2415  * @index:      fastpath index to be zeroed
2416  *
2417  * Makes sure the contents of the bp->fp[index].napi is kept
2418  * intact.
2419  */
2420 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2421 {
2422         struct bnx2x_fastpath *fp = &bp->fp[index];
2423         int cos;
2424         struct napi_struct orig_napi = fp->napi;
2425         struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2426
2427         /* bzero bnx2x_fastpath contents */
2428         if (fp->tpa_info)
2429                 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2430                        sizeof(struct bnx2x_agg_info));
2431         memset(fp, 0, sizeof(*fp));
2432
2433         /* Restore the NAPI object as it has been already initialized */
2434         fp->napi = orig_napi;
2435         fp->tpa_info = orig_tpa_info;
2436         fp->bp = bp;
2437         fp->index = index;
2438         if (IS_ETH_FP(fp))
2439                 fp->max_cos = bp->max_cos;
2440         else
2441                 /* Special queues support only one CoS */
2442                 fp->max_cos = 1;
2443
2444         /* Init txdata pointers */
2445         if (IS_FCOE_FP(fp))
2446                 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2447         if (IS_ETH_FP(fp))
2448                 for_each_cos_in_tx_queue(fp, cos)
2449                         fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2450                                 BNX2X_NUM_ETH_QUEUES(bp) + index];
2451
2452         /* set the tpa flag for each queue. The tpa flag determines the queue
2453          * minimal size so it must be set prior to queue memory allocation
2454          */
2455         fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2456                                   (bp->flags & GRO_ENABLE_FLAG &&
2457                                    bnx2x_mtu_allows_gro(bp->dev->mtu)));
2458         if (bp->flags & TPA_ENABLE_FLAG)
2459                 fp->mode = TPA_MODE_LRO;
2460         else if (bp->flags & GRO_ENABLE_FLAG)
2461                 fp->mode = TPA_MODE_GRO;
2462
2463         /* We don't want TPA on an FCoE L2 ring */
2464         if (IS_FCOE_FP(fp))
2465                 fp->disable_tpa = 1;
2466 }
2467
2468 int bnx2x_load_cnic(struct bnx2x *bp)
2469 {
2470         int i, rc, port = BP_PORT(bp);
2471
2472         DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2473
2474         mutex_init(&bp->cnic_mutex);
2475
2476         if (IS_PF(bp)) {
2477                 rc = bnx2x_alloc_mem_cnic(bp);
2478                 if (rc) {
2479                         BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2480                         LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2481                 }
2482         }
2483
2484         rc = bnx2x_alloc_fp_mem_cnic(bp);
2485         if (rc) {
2486                 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2487                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2488         }
2489
2490         /* Update the number of queues with the cnic queues */
2491         rc = bnx2x_set_real_num_queues(bp, 1);
2492         if (rc) {
2493                 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2494                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2495         }
2496
2497         /* Add all CNIC NAPI objects */
2498         bnx2x_add_all_napi_cnic(bp);
2499         DP(NETIF_MSG_IFUP, "cnic napi added\n");
2500         bnx2x_napi_enable_cnic(bp);
2501
2502         rc = bnx2x_init_hw_func_cnic(bp);
2503         if (rc)
2504                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2505
2506         bnx2x_nic_init_cnic(bp);
2507
2508         if (IS_PF(bp)) {
2509                 /* Enable Timer scan */
2510                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2511
2512                 /* setup cnic queues */
2513                 for_each_cnic_queue(bp, i) {
2514                         rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2515                         if (rc) {
2516                                 BNX2X_ERR("Queue setup failed\n");
2517                                 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2518                         }
2519                 }
2520         }
2521
2522         /* Initialize Rx filter. */
2523         bnx2x_set_rx_mode_inner(bp);
2524
2525         /* re-read iscsi info */
2526         bnx2x_get_iscsi_info(bp);
2527         bnx2x_setup_cnic_irq_info(bp);
2528         bnx2x_setup_cnic_info(bp);
2529         bp->cnic_loaded = true;
2530         if (bp->state == BNX2X_STATE_OPEN)
2531                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2532
2533         DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2534
2535         return 0;
2536
2537 #ifndef BNX2X_STOP_ON_ERROR
2538 load_error_cnic2:
2539         /* Disable Timer scan */
2540         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2541
2542 load_error_cnic1:
2543         bnx2x_napi_disable_cnic(bp);
2544         /* Update the number of queues without the cnic queues */
2545         if (bnx2x_set_real_num_queues(bp, 0))
2546                 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2547 load_error_cnic0:
2548         BNX2X_ERR("CNIC-related load failed\n");
2549         bnx2x_free_fp_mem_cnic(bp);
2550         bnx2x_free_mem_cnic(bp);
2551         return rc;
2552 #endif /* ! BNX2X_STOP_ON_ERROR */
2553 }
2554
2555 /* must be called with rtnl_lock */
2556 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2557 {
2558         int port = BP_PORT(bp);
2559         int i, rc = 0, load_code = 0;
2560
2561         DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2562         DP(NETIF_MSG_IFUP,
2563            "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2564
2565 #ifdef BNX2X_STOP_ON_ERROR
2566         if (unlikely(bp->panic)) {
2567                 BNX2X_ERR("Can't load NIC when there is panic\n");
2568                 return -EPERM;
2569         }
2570 #endif
2571
2572         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2573
2574         /* zero the structure w/o any lock, before SP handler is initialized */
2575         memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2576         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2577                 &bp->last_reported_link.link_report_flags);
2578
2579         if (IS_PF(bp))
2580                 /* must be called before memory allocation and HW init */
2581                 bnx2x_ilt_set_info(bp);
2582
2583         /*
2584          * Zero fastpath structures preserving invariants like napi, which are
2585          * allocated only once, fp index, max_cos, bp pointer.
2586          * Also set fp->disable_tpa and txdata_ptr.
2587          */
2588         DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2589         for_each_queue(bp, i)
2590                 bnx2x_bz_fp(bp, i);
2591         memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2592                                   bp->num_cnic_queues) *
2593                                   sizeof(struct bnx2x_fp_txdata));
2594
2595         bp->fcoe_init = false;
2596
2597         /* Set the receive queues buffer size */
2598         bnx2x_set_rx_buf_size(bp);
2599
2600         if (IS_PF(bp)) {
2601                 rc = bnx2x_alloc_mem(bp);
2602                 if (rc) {
2603                         BNX2X_ERR("Unable to allocate bp memory\n");
2604                         return rc;
2605                 }
2606         }
2607
2608         /* need to be done after alloc mem, since it's self adjusting to amount
2609          * of memory available for RSS queues
2610          */
2611         rc = bnx2x_alloc_fp_mem(bp);
2612         if (rc) {
2613                 BNX2X_ERR("Unable to allocate memory for fps\n");
2614                 LOAD_ERROR_EXIT(bp, load_error0);
2615         }
2616
2617         /* Allocated memory for FW statistics  */
2618         if (bnx2x_alloc_fw_stats_mem(bp))
2619                 LOAD_ERROR_EXIT(bp, load_error0);
2620
2621         /* request pf to initialize status blocks */
2622         if (IS_VF(bp)) {
2623                 rc = bnx2x_vfpf_init(bp);
2624                 if (rc)
2625                         LOAD_ERROR_EXIT(bp, load_error0);
2626         }
2627
2628         /* As long as bnx2x_alloc_mem() may possibly update
2629          * bp->num_queues, bnx2x_set_real_num_queues() should always
2630          * come after it. At this stage cnic queues are not counted.
2631          */
2632         rc = bnx2x_set_real_num_queues(bp, 0);
2633         if (rc) {
2634                 BNX2X_ERR("Unable to set real_num_queues\n");
2635                 LOAD_ERROR_EXIT(bp, load_error0);
2636         }
2637
2638         /* configure multi cos mappings in kernel.
2639          * this configuration may be overridden by a multi class queue
2640          * discipline or by a dcbx negotiation result.
2641          */
2642         bnx2x_setup_tc(bp->dev, bp->max_cos);
2643
2644         /* Add all NAPI objects */
2645         bnx2x_add_all_napi(bp);
2646         DP(NETIF_MSG_IFUP, "napi added\n");
2647         bnx2x_napi_enable(bp);
2648
2649         if (IS_PF(bp)) {
2650                 /* set pf load just before approaching the MCP */
2651                 bnx2x_set_pf_load(bp);
2652
2653                 /* if mcp exists send load request and analyze response */
2654                 if (!BP_NOMCP(bp)) {
2655                         /* attempt to load pf */
2656                         rc = bnx2x_nic_load_request(bp, &load_code);
2657                         if (rc)
2658                                 LOAD_ERROR_EXIT(bp, load_error1);
2659
2660                         /* what did mcp say? */
2661                         rc = bnx2x_compare_fw_ver(bp, load_code, true);
2662                         if (rc) {
2663                                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2664                                 LOAD_ERROR_EXIT(bp, load_error2);
2665                         }
2666                 } else {
2667                         load_code = bnx2x_nic_load_no_mcp(bp, port);
2668                 }
2669
2670                 /* mark pmf if applicable */
2671                 bnx2x_nic_load_pmf(bp, load_code);
2672
2673                 /* Init Function state controlling object */
2674                 bnx2x__init_func_obj(bp);
2675
2676                 /* Initialize HW */
2677                 rc = bnx2x_init_hw(bp, load_code);
2678                 if (rc) {
2679                         BNX2X_ERR("HW init failed, aborting\n");
2680                         bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2681                         LOAD_ERROR_EXIT(bp, load_error2);
2682                 }
2683         }
2684
2685         bnx2x_pre_irq_nic_init(bp);
2686
2687         /* Connect to IRQs */
2688         rc = bnx2x_setup_irqs(bp);
2689         if (rc) {
2690                 BNX2X_ERR("setup irqs failed\n");
2691                 if (IS_PF(bp))
2692                         bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2693                 LOAD_ERROR_EXIT(bp, load_error2);
2694         }
2695
2696         /* Init per-function objects */
2697         if (IS_PF(bp)) {
2698                 /* Setup NIC internals and enable interrupts */
2699                 bnx2x_post_irq_nic_init(bp, load_code);
2700
2701                 bnx2x_init_bp_objs(bp);
2702                 bnx2x_iov_nic_init(bp);
2703
2704                 /* Set AFEX default VLAN tag to an invalid value */
2705                 bp->afex_def_vlan_tag = -1;
2706                 bnx2x_nic_load_afex_dcc(bp, load_code);
2707                 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2708                 rc = bnx2x_func_start(bp);
2709                 if (rc) {
2710                         BNX2X_ERR("Function start failed!\n");
2711                         bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2712
2713                         LOAD_ERROR_EXIT(bp, load_error3);
2714                 }
2715
2716                 /* Send LOAD_DONE command to MCP */
2717                 if (!BP_NOMCP(bp)) {
2718                         load_code = bnx2x_fw_command(bp,
2719                                                      DRV_MSG_CODE_LOAD_DONE, 0);
2720                         if (!load_code) {
2721                                 BNX2X_ERR("MCP response failure, aborting\n");
2722                                 rc = -EBUSY;
2723                                 LOAD_ERROR_EXIT(bp, load_error3);
2724                         }
2725                 }
2726
2727                 /* initialize FW coalescing state machines in RAM */
2728                 bnx2x_update_coalesce(bp);
2729         }
2730
2731         /* setup the leading queue */
2732         rc = bnx2x_setup_leading(bp);
2733         if (rc) {
2734                 BNX2X_ERR("Setup leading failed!\n");
2735                 LOAD_ERROR_EXIT(bp, load_error3);
2736         }
2737
2738         /* set up the rest of the queues */
2739         for_each_nondefault_eth_queue(bp, i) {
2740                 if (IS_PF(bp))
2741                         rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2742                 else /* VF */
2743                         rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2744                 if (rc) {
2745                         BNX2X_ERR("Queue %d setup failed\n", i);
2746                         LOAD_ERROR_EXIT(bp, load_error3);
2747                 }
2748         }
2749
2750         /* setup rss */
2751         rc = bnx2x_init_rss(bp);
2752         if (rc) {
2753                 BNX2X_ERR("PF RSS init failed\n");
2754                 LOAD_ERROR_EXIT(bp, load_error3);
2755         }
2756
2757         /* Now when Clients are configured we are ready to work */
2758         bp->state = BNX2X_STATE_OPEN;
2759
2760         /* Configure a ucast MAC */
2761         if (IS_PF(bp))
2762                 rc = bnx2x_set_eth_mac(bp, true);
2763         else /* vf */
2764                 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2765                                            true);
2766         if (rc) {
2767                 BNX2X_ERR("Setting Ethernet MAC failed\n");
2768                 LOAD_ERROR_EXIT(bp, load_error3);
2769         }
2770
2771         if (IS_PF(bp) && bp->pending_max) {
2772                 bnx2x_update_max_mf_config(bp, bp->pending_max);
2773                 bp->pending_max = 0;
2774         }
2775
2776         if (bp->port.pmf) {
2777                 rc = bnx2x_initial_phy_init(bp, load_mode);
2778                 if (rc)
2779                         LOAD_ERROR_EXIT(bp, load_error3);
2780         }
2781         bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2782
2783         /* Start fast path */
2784
2785         /* Initialize Rx filter. */
2786         bnx2x_set_rx_mode_inner(bp);
2787
2788         /* Start the Tx */
2789         switch (load_mode) {
2790         case LOAD_NORMAL:
2791                 /* Tx queue should be only re-enabled */
2792                 netif_tx_wake_all_queues(bp->dev);
2793                 break;
2794
2795         case LOAD_OPEN:
2796                 netif_tx_start_all_queues(bp->dev);
2797                 smp_mb__after_atomic();
2798                 break;
2799
2800         case LOAD_DIAG:
2801         case LOAD_LOOPBACK_EXT:
2802                 bp->state = BNX2X_STATE_DIAG;
2803                 break;
2804
2805         default:
2806                 break;
2807         }
2808
2809         if (bp->port.pmf)
2810                 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2811         else
2812                 bnx2x__link_status_update(bp);
2813
2814         /* start the timer */
2815         mod_timer(&bp->timer, jiffies + bp->current_interval);
2816
2817         if (CNIC_ENABLED(bp))
2818                 bnx2x_load_cnic(bp);
2819
2820         if (IS_PF(bp))
2821                 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2822
2823         if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2824                 /* mark driver is loaded in shmem2 */
2825                 u32 val;
2826                 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2827                 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2828                           val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2829                           DRV_FLAGS_CAPABILITIES_LOADED_L2);
2830         }
2831
2832         /* Wait for all pending SP commands to complete */
2833         if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2834                 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2835                 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2836                 return -EBUSY;
2837         }
2838
2839         /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2840         if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2841                 bnx2x_dcbx_init(bp, false);
2842
2843         DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2844
2845         return 0;
2846
2847 #ifndef BNX2X_STOP_ON_ERROR
2848 load_error3:
2849         if (IS_PF(bp)) {
2850                 bnx2x_int_disable_sync(bp, 1);
2851
2852                 /* Clean queueable objects */
2853                 bnx2x_squeeze_objects(bp);
2854         }
2855
2856         /* Free SKBs, SGEs, TPA pool and driver internals */
2857         bnx2x_free_skbs(bp);
2858         for_each_rx_queue(bp, i)
2859                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2860
2861         /* Release IRQs */
2862         bnx2x_free_irq(bp);
2863 load_error2:
2864         if (IS_PF(bp) && !BP_NOMCP(bp)) {
2865                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2866                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2867         }
2868
2869         bp->port.pmf = 0;
2870 load_error1:
2871         bnx2x_napi_disable(bp);
2872         bnx2x_del_all_napi(bp);
2873
2874         /* clear pf_load status, as it was already set */
2875         if (IS_PF(bp))
2876                 bnx2x_clear_pf_load(bp);
2877 load_error0:
2878         bnx2x_free_fw_stats_mem(bp);
2879         bnx2x_free_fp_mem(bp);
2880         bnx2x_free_mem(bp);
2881
2882         return rc;
2883 #endif /* ! BNX2X_STOP_ON_ERROR */
2884 }
2885
2886 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2887 {
2888         u8 rc = 0, cos, i;
2889
2890         /* Wait until tx fastpath tasks complete */
2891         for_each_tx_queue(bp, i) {
2892                 struct bnx2x_fastpath *fp = &bp->fp[i];
2893
2894                 for_each_cos_in_tx_queue(fp, cos)
2895                         rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2896                 if (rc)
2897                         return rc;
2898         }
2899         return 0;
2900 }
2901
2902 /* must be called with rtnl_lock */
2903 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2904 {
2905         int i;
2906         bool global = false;
2907
2908         DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2909
2910         /* mark driver is unloaded in shmem2 */
2911         if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2912                 u32 val;
2913                 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2914                 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2915                           val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2916         }
2917
2918         if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2919             (bp->state == BNX2X_STATE_CLOSED ||
2920              bp->state == BNX2X_STATE_ERROR)) {
2921                 /* We can get here if the driver has been unloaded
2922                  * during parity error recovery and is either waiting for a
2923                  * leader to complete or for other functions to unload and
2924                  * then ifdown has been issued. In this case we want to
2925                  * unload and let other functions to complete a recovery
2926                  * process.
2927                  */
2928                 bp->recovery_state = BNX2X_RECOVERY_DONE;
2929                 bp->is_leader = 0;
2930                 bnx2x_release_leader_lock(bp);
2931                 smp_mb();
2932
2933                 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2934                 BNX2X_ERR("Can't unload in closed or error state\n");
2935                 return -EINVAL;
2936         }
2937
2938         /* Nothing to do during unload if previous bnx2x_nic_load()
2939          * have not completed successfully - all resources are released.
2940          *
2941          * we can get here only after unsuccessful ndo_* callback, during which
2942          * dev->IFF_UP flag is still on.
2943          */
2944         if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2945                 return 0;
2946
2947         /* It's important to set the bp->state to the value different from
2948          * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2949          * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2950          */
2951         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2952         smp_mb();
2953
2954         /* indicate to VFs that the PF is going down */
2955         bnx2x_iov_channel_down(bp);
2956
2957         if (CNIC_LOADED(bp))
2958                 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2959
2960         /* Stop Tx */
2961         bnx2x_tx_disable(bp);
2962         netdev_reset_tc(bp->dev);
2963
2964         bp->rx_mode = BNX2X_RX_MODE_NONE;
2965
2966         del_timer_sync(&bp->timer);
2967
2968         if (IS_PF(bp)) {
2969                 /* Set ALWAYS_ALIVE bit in shmem */
2970                 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2971                 bnx2x_drv_pulse(bp);
2972                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2973                 bnx2x_save_statistics(bp);
2974         }
2975
2976         /* wait till consumers catch up with producers in all queues */
2977         bnx2x_drain_tx_queues(bp);
2978
2979         /* if VF indicate to PF this function is going down (PF will delete sp
2980          * elements and clear initializations
2981          */
2982         if (IS_VF(bp))
2983                 bnx2x_vfpf_close_vf(bp);
2984         else if (unload_mode != UNLOAD_RECOVERY)
2985                 /* if this is a normal/close unload need to clean up chip*/
2986                 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
2987         else {
2988                 /* Send the UNLOAD_REQUEST to the MCP */
2989                 bnx2x_send_unload_req(bp, unload_mode);
2990
2991                 /* Prevent transactions to host from the functions on the
2992                  * engine that doesn't reset global blocks in case of global
2993                  * attention once global blocks are reset and gates are opened
2994                  * (the engine which leader will perform the recovery
2995                  * last).
2996                  */
2997                 if (!CHIP_IS_E1x(bp))
2998                         bnx2x_pf_disable(bp);
2999
3000                 /* Disable HW interrupts, NAPI */
3001                 bnx2x_netif_stop(bp, 1);
3002                 /* Delete all NAPI objects */
3003                 bnx2x_del_all_napi(bp);
3004                 if (CNIC_LOADED(bp))
3005                         bnx2x_del_all_napi_cnic(bp);
3006                 /* Release IRQs */
3007                 bnx2x_free_irq(bp);
3008
3009                 /* Report UNLOAD_DONE to MCP */
3010                 bnx2x_send_unload_done(bp, false);
3011         }
3012
3013         /*
3014          * At this stage no more interrupts will arrive so we may safely clean
3015          * the queueable objects here in case they failed to get cleaned so far.
3016          */
3017         if (IS_PF(bp))
3018                 bnx2x_squeeze_objects(bp);
3019
3020         /* There should be no more pending SP commands at this stage */
3021         bp->sp_state = 0;
3022
3023         bp->port.pmf = 0;
3024
3025         /* clear pending work in rtnl task */
3026         bp->sp_rtnl_state = 0;
3027         smp_mb();
3028
3029         /* Free SKBs, SGEs, TPA pool and driver internals */
3030         bnx2x_free_skbs(bp);
3031         if (CNIC_LOADED(bp))
3032                 bnx2x_free_skbs_cnic(bp);
3033         for_each_rx_queue(bp, i)
3034                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3035
3036         bnx2x_free_fp_mem(bp);
3037         if (CNIC_LOADED(bp))
3038                 bnx2x_free_fp_mem_cnic(bp);
3039
3040         if (IS_PF(bp)) {
3041                 if (CNIC_LOADED(bp))
3042                         bnx2x_free_mem_cnic(bp);
3043         }
3044         bnx2x_free_mem(bp);
3045
3046         bp->state = BNX2X_STATE_CLOSED;
3047         bp->cnic_loaded = false;
3048
3049         /* Clear driver version indication in shmem */
3050         if (IS_PF(bp))
3051                 bnx2x_update_mng_version(bp);
3052
3053         /* Check if there are pending parity attentions. If there are - set
3054          * RECOVERY_IN_PROGRESS.
3055          */
3056         if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3057                 bnx2x_set_reset_in_progress(bp);
3058
3059                 /* Set RESET_IS_GLOBAL if needed */
3060                 if (global)
3061                         bnx2x_set_reset_global(bp);
3062         }
3063
3064         /* The last driver must disable a "close the gate" if there is no
3065          * parity attention or "process kill" pending.
3066          */
3067         if (IS_PF(bp) &&
3068             !bnx2x_clear_pf_load(bp) &&
3069             bnx2x_reset_is_done(bp, BP_PATH(bp)))
3070                 bnx2x_disable_close_the_gate(bp);
3071
3072         DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3073
3074         return 0;
3075 }
3076
3077 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3078 {
3079         u16 pmcsr;
3080
3081         /* If there is no power capability, silently succeed */
3082         if (!bp->pdev->pm_cap) {
3083                 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3084                 return 0;
3085         }
3086
3087         pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3088
3089         switch (state) {
3090         case PCI_D0:
3091                 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3092                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3093                                        PCI_PM_CTRL_PME_STATUS));
3094
3095                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3096                         /* delay required during transition out of D3hot */
3097                         msleep(20);
3098                 break;
3099
3100         case PCI_D3hot:
3101                 /* If there are other clients above don't
3102                    shut down the power */
3103                 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3104                         return 0;
3105                 /* Don't shut down the power for emulation and FPGA */
3106                 if (CHIP_REV_IS_SLOW(bp))
3107                         return 0;
3108
3109                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3110                 pmcsr |= 3;
3111
3112                 if (bp->wol)
3113                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3114
3115                 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3116                                       pmcsr);
3117
3118                 /* No more memory access after this point until
3119                 * device is brought back to D0.
3120                 */
3121                 break;
3122
3123         default:
3124                 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3125                 return -EINVAL;
3126         }
3127         return 0;
3128 }
3129
3130 /*
3131  * net_device service functions
3132  */
3133 static int bnx2x_poll(struct napi_struct *napi, int budget)
3134 {
3135         int work_done = 0;
3136         u8 cos;
3137         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3138                                                  napi);
3139         struct bnx2x *bp = fp->bp;
3140
3141         while (1) {
3142 #ifdef BNX2X_STOP_ON_ERROR
3143                 if (unlikely(bp->panic)) {
3144                         napi_complete(napi);
3145                         return 0;
3146                 }
3147 #endif
3148                 if (!bnx2x_fp_lock_napi(fp))
3149                         return work_done;
3150
3151                 for_each_cos_in_tx_queue(fp, cos)
3152                         if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3153                                 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3154
3155                 if (bnx2x_has_rx_work(fp)) {
3156                         work_done += bnx2x_rx_int(fp, budget - work_done);
3157
3158                         /* must not complete if we consumed full budget */
3159                         if (work_done >= budget) {
3160                                 bnx2x_fp_unlock_napi(fp);
3161                                 break;
3162                         }
3163                 }
3164
3165                 /* Fall out from the NAPI loop if needed */
3166                 if (!bnx2x_fp_unlock_napi(fp) &&
3167                     !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3168
3169                         /* No need to update SB for FCoE L2 ring as long as
3170                          * it's connected to the default SB and the SB
3171                          * has been updated when NAPI was scheduled.
3172                          */
3173                         if (IS_FCOE_FP(fp)) {
3174                                 napi_complete(napi);
3175                                 break;
3176                         }
3177                         bnx2x_update_fpsb_idx(fp);
3178                         /* bnx2x_has_rx_work() reads the status block,
3179                          * thus we need to ensure that status block indices
3180                          * have been actually read (bnx2x_update_fpsb_idx)
3181                          * prior to this check (bnx2x_has_rx_work) so that
3182                          * we won't write the "newer" value of the status block
3183                          * to IGU (if there was a DMA right after
3184                          * bnx2x_has_rx_work and if there is no rmb, the memory
3185                          * reading (bnx2x_update_fpsb_idx) may be postponed
3186                          * to right before bnx2x_ack_sb). In this case there
3187                          * will never be another interrupt until there is
3188                          * another update of the status block, while there
3189                          * is still unhandled work.
3190                          */
3191                         rmb();
3192
3193                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3194                                 napi_complete(napi);
3195                                 /* Re-enable interrupts */
3196                                 DP(NETIF_MSG_RX_STATUS,
3197                                    "Update index to %d\n", fp->fp_hc_idx);
3198                                 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3199                                              le16_to_cpu(fp->fp_hc_idx),
3200                                              IGU_INT_ENABLE, 1);
3201                                 break;
3202                         }
3203                 }
3204         }
3205
3206         return work_done;
3207 }
3208
3209 #ifdef CONFIG_NET_RX_BUSY_POLL
3210 /* must be called with local_bh_disable()d */
3211 int bnx2x_low_latency_recv(struct napi_struct *napi)
3212 {
3213         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3214                                                  napi);
3215         struct bnx2x *bp = fp->bp;
3216         int found = 0;
3217
3218         if ((bp->state == BNX2X_STATE_CLOSED) ||
3219             (bp->state == BNX2X_STATE_ERROR) ||
3220             (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3221                 return LL_FLUSH_FAILED;
3222
3223         if (!bnx2x_fp_lock_poll(fp))
3224                 return LL_FLUSH_BUSY;
3225
3226         if (bnx2x_has_rx_work(fp))
3227                 found = bnx2x_rx_int(fp, 4);
3228
3229         bnx2x_fp_unlock_poll(fp);
3230
3231         return found;
3232 }
3233 #endif
3234
3235 /* we split the first BD into headers and data BDs
3236  * to ease the pain of our fellow microcode engineers
3237  * we use one mapping for both BDs
3238  */
3239 static u16 bnx2x_tx_split(struct bnx2x *bp,
3240                           struct bnx2x_fp_txdata *txdata,
3241                           struct sw_tx_bd *tx_buf,
3242                           struct eth_tx_start_bd **tx_bd, u16 hlen,
3243                           u16 bd_prod)
3244 {
3245         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3246         struct eth_tx_bd *d_tx_bd;
3247         dma_addr_t mapping;
3248         int old_len = le16_to_cpu(h_tx_bd->nbytes);
3249
3250         /* first fix first BD */
3251         h_tx_bd->nbytes = cpu_to_le16(hlen);
3252
3253         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3254            h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3255
3256         /* now get a new data BD
3257          * (after the pbd) and fill it */
3258         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3259         d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3260
3261         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3262                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3263
3264         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3265         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3266         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3267
3268         /* this marks the BD as one that has no individual mapping */
3269         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3270
3271         DP(NETIF_MSG_TX_QUEUED,
3272            "TSO split data size is %d (%x:%x)\n",
3273            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3274
3275         /* update tx_bd */
3276         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3277
3278         return bd_prod;
3279 }
3280
3281 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3282 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3283 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3284 {
3285         __sum16 tsum = (__force __sum16) csum;
3286
3287         if (fix > 0)
3288                 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3289                                   csum_partial(t_header - fix, fix, 0)));
3290
3291         else if (fix < 0)
3292                 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3293                                   csum_partial(t_header, -fix, 0)));
3294
3295         return bswab16(tsum);
3296 }
3297
3298 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3299 {
3300         u32 rc;
3301         __u8 prot = 0;
3302         __be16 protocol;
3303
3304         if (skb->ip_summed != CHECKSUM_PARTIAL)
3305                 return XMIT_PLAIN;
3306
3307         protocol = vlan_get_protocol(skb);
3308         if (protocol == htons(ETH_P_IPV6)) {
3309                 rc = XMIT_CSUM_V6;
3310                 prot = ipv6_hdr(skb)->nexthdr;
3311         } else {
3312                 rc = XMIT_CSUM_V4;
3313                 prot = ip_hdr(skb)->protocol;
3314         }
3315
3316         if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3317                 if (inner_ip_hdr(skb)->version == 6) {
3318                         rc |= XMIT_CSUM_ENC_V6;
3319                         if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3320                                 rc |= XMIT_CSUM_TCP;
3321                 } else {
3322                         rc |= XMIT_CSUM_ENC_V4;
3323                         if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3324                                 rc |= XMIT_CSUM_TCP;
3325                 }
3326         }
3327         if (prot == IPPROTO_TCP)
3328                 rc |= XMIT_CSUM_TCP;
3329
3330         if (skb_is_gso(skb)) {
3331                 if (skb_is_gso_v6(skb)) {
3332                         rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3333                         if (rc & XMIT_CSUM_ENC)
3334                                 rc |= XMIT_GSO_ENC_V6;
3335                 } else {
3336                         rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3337                         if (rc & XMIT_CSUM_ENC)
3338                                 rc |= XMIT_GSO_ENC_V4;
3339                 }
3340         }
3341
3342         return rc;
3343 }
3344
3345 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3346 /* check if packet requires linearization (packet is too fragmented)
3347    no need to check fragmentation if page size > 8K (there will be no
3348    violation to FW restrictions) */
3349 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3350                              u32 xmit_type)
3351 {
3352         int to_copy = 0;
3353         int hlen = 0;
3354         int first_bd_sz = 0;
3355
3356         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3357         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3358
3359                 if (xmit_type & XMIT_GSO) {
3360                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3361                         /* Check if LSO packet needs to be copied:
3362                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3363                         int wnd_size = MAX_FETCH_BD - 3;
3364                         /* Number of windows to check */
3365                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3366                         int wnd_idx = 0;
3367                         int frag_idx = 0;
3368                         u32 wnd_sum = 0;
3369
3370                         /* Headers length */
3371                         hlen = (int)(skb_transport_header(skb) - skb->data) +
3372                                 tcp_hdrlen(skb);
3373
3374                         /* Amount of data (w/o headers) on linear part of SKB*/
3375                         first_bd_sz = skb_headlen(skb) - hlen;
3376
3377                         wnd_sum  = first_bd_sz;
3378
3379                         /* Calculate the first sum - it's special */
3380                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3381                                 wnd_sum +=
3382                                         skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3383
3384                         /* If there was data on linear skb data - check it */
3385                         if (first_bd_sz > 0) {
3386                                 if (unlikely(wnd_sum < lso_mss)) {
3387                                         to_copy = 1;
3388                                         goto exit_lbl;
3389                                 }
3390
3391                                 wnd_sum -= first_bd_sz;
3392                         }
3393
3394                         /* Others are easier: run through the frag list and
3395                            check all windows */
3396                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3397                                 wnd_sum +=
3398                           skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3399
3400                                 if (unlikely(wnd_sum < lso_mss)) {
3401                                         to_copy = 1;
3402                                         break;
3403                                 }
3404                                 wnd_sum -=
3405                                         skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3406                         }
3407                 } else {
3408                         /* in non-LSO too fragmented packet should always
3409                            be linearized */
3410                         to_copy = 1;
3411                 }
3412         }
3413
3414 exit_lbl:
3415         if (unlikely(to_copy))
3416                 DP(NETIF_MSG_TX_QUEUED,
3417                    "Linearization IS REQUIRED for %s packet. num_frags %d  hlen %d  first_bd_sz %d\n",
3418                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3419                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3420
3421         return to_copy;
3422 }
3423 #endif
3424
3425 static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3426                                  u32 xmit_type)
3427 {
3428         struct ipv6hdr *ipv6;
3429
3430         *parsing_data |= (skb_shinfo(skb)->gso_size <<
3431                               ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3432                               ETH_TX_PARSE_BD_E2_LSO_MSS;
3433
3434         if (xmit_type & XMIT_GSO_ENC_V6)
3435                 ipv6 = inner_ipv6_hdr(skb);
3436         else if (xmit_type & XMIT_GSO_V6)
3437                 ipv6 = ipv6_hdr(skb);
3438         else
3439                 ipv6 = NULL;
3440
3441         if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
3442                 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3443 }
3444
3445 /**
3446  * bnx2x_set_pbd_gso - update PBD in GSO case.
3447  *
3448  * @skb:        packet skb
3449  * @pbd:        parse BD
3450  * @xmit_type:  xmit flags
3451  */
3452 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3453                               struct eth_tx_parse_bd_e1x *pbd,
3454                               struct eth_tx_start_bd *tx_start_bd,
3455                               u32 xmit_type)
3456 {
3457         pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3458         pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3459         pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3460
3461         if (xmit_type & XMIT_GSO_V4) {
3462                 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3463                 pbd->tcp_pseudo_csum =
3464                         bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3465                                                    ip_hdr(skb)->daddr,
3466                                                    0, IPPROTO_TCP, 0));
3467
3468                 /* GSO on 57710/57711 needs FW to calculate IP checksum */
3469                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3470         } else {
3471                 pbd->tcp_pseudo_csum =
3472                         bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3473                                                  &ipv6_hdr(skb)->daddr,
3474                                                  0, IPPROTO_TCP, 0));
3475         }
3476
3477         pbd->global_data |=
3478                 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3479 }
3480
3481 /**
3482  * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3483  *
3484  * @bp:                 driver handle
3485  * @skb:                packet skb
3486  * @parsing_data:       data to be updated
3487  * @xmit_type:          xmit flags
3488  *
3489  * 57712/578xx related, when skb has encapsulation
3490  */
3491 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3492                                  u32 *parsing_data, u32 xmit_type)
3493 {
3494         *parsing_data |=
3495                 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3496                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3497                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3498
3499         if (xmit_type & XMIT_CSUM_TCP) {
3500                 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3501                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3502                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3503
3504                 return skb_inner_transport_header(skb) +
3505                         inner_tcp_hdrlen(skb) - skb->data;
3506         }
3507
3508         /* We support checksum offload for TCP and UDP only.
3509          * No need to pass the UDP header length - it's a constant.
3510          */
3511         return skb_inner_transport_header(skb) +
3512                 sizeof(struct udphdr) - skb->data;
3513 }
3514
3515 /**
3516  * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3517  *
3518  * @bp:                 driver handle
3519  * @skb:                packet skb
3520  * @parsing_data:       data to be updated
3521  * @xmit_type:          xmit flags
3522  *
3523  * 57712/578xx related
3524  */
3525 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3526                                 u32 *parsing_data, u32 xmit_type)
3527 {
3528         *parsing_data |=
3529                 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3530                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3531                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3532
3533         if (xmit_type & XMIT_CSUM_TCP) {
3534                 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3535                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3536                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3537
3538                 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3539         }
3540         /* We support checksum offload for TCP and UDP only.
3541          * No need to pass the UDP header length - it's a constant.
3542          */
3543         return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3544 }
3545
3546 /* set FW indication according to inner or outer protocols if tunneled */
3547 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3548                                struct eth_tx_start_bd *tx_start_bd,
3549                                u32 xmit_type)
3550 {
3551         tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3552
3553         if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3554                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3555
3556         if (!(xmit_type & XMIT_CSUM_TCP))
3557                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3558 }
3559
3560 /**
3561  * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3562  *
3563  * @bp:         driver handle
3564  * @skb:        packet skb
3565  * @pbd:        parse BD to be updated
3566  * @xmit_type:  xmit flags
3567  */
3568 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3569                              struct eth_tx_parse_bd_e1x *pbd,
3570                              u32 xmit_type)
3571 {
3572         u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3573
3574         /* for now NS flag is not used in Linux */
3575         pbd->global_data =
3576                 cpu_to_le16(hlen |
3577                             ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3578                              ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3579
3580         pbd->ip_hlen_w = (skb_transport_header(skb) -
3581                         skb_network_header(skb)) >> 1;
3582
3583         hlen += pbd->ip_hlen_w;
3584
3585         /* We support checksum offload for TCP and UDP only */
3586         if (xmit_type & XMIT_CSUM_TCP)
3587                 hlen += tcp_hdrlen(skb) / 2;
3588         else
3589                 hlen += sizeof(struct udphdr) / 2;
3590
3591         pbd->total_hlen_w = cpu_to_le16(hlen);
3592         hlen = hlen*2;
3593
3594         if (xmit_type & XMIT_CSUM_TCP) {
3595                 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3596
3597         } else {
3598                 s8 fix = SKB_CS_OFF(skb); /* signed! */
3599
3600                 DP(NETIF_MSG_TX_QUEUED,
3601                    "hlen %d  fix %d  csum before fix %x\n",
3602                    le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3603
3604                 /* HW bug: fixup the CSUM */
3605                 pbd->tcp_pseudo_csum =
3606                         bnx2x_csum_fix(skb_transport_header(skb),
3607                                        SKB_CS(skb), fix);
3608
3609                 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3610                    pbd->tcp_pseudo_csum);
3611         }
3612
3613         return hlen;
3614 }
3615
3616 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3617                                       struct eth_tx_parse_bd_e2 *pbd_e2,
3618                                       struct eth_tx_parse_2nd_bd *pbd2,
3619                                       u16 *global_data,
3620                                       u32 xmit_type)
3621 {
3622         u16 hlen_w = 0;
3623         u8 outerip_off, outerip_len = 0;
3624
3625         /* from outer IP to transport */
3626         hlen_w = (skb_inner_transport_header(skb) -
3627                   skb_network_header(skb)) >> 1;
3628
3629         /* transport len */
3630         hlen_w += inner_tcp_hdrlen(skb) >> 1;
3631
3632         pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3633
3634         /* outer IP header info */
3635         if (xmit_type & XMIT_CSUM_V4) {
3636                 struct iphdr *iph = ip_hdr(skb);
3637                 u32 csum = (__force u32)(~iph->check) -
3638                            (__force u32)iph->tot_len -
3639                            (__force u32)iph->frag_off;
3640
3641                 pbd2->fw_ip_csum_wo_len_flags_frag =
3642                         bswab16(csum_fold((__force __wsum)csum));
3643         } else {
3644                 pbd2->fw_ip_hdr_to_payload_w =
3645                         hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3646         }
3647
3648         pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3649
3650         pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3651
3652         if (xmit_type & XMIT_GSO_V4) {
3653                 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3654
3655                 pbd_e2->data.tunnel_data.pseudo_csum =
3656                         bswab16(~csum_tcpudp_magic(
3657                                         inner_ip_hdr(skb)->saddr,
3658                                         inner_ip_hdr(skb)->daddr,
3659                                         0, IPPROTO_TCP, 0));
3660
3661                 outerip_len = ip_hdr(skb)->ihl << 1;
3662         } else {
3663                 pbd_e2->data.tunnel_data.pseudo_csum =
3664                         bswab16(~csum_ipv6_magic(
3665                                         &inner_ipv6_hdr(skb)->saddr,
3666                                         &inner_ipv6_hdr(skb)->daddr,
3667                                         0, IPPROTO_TCP, 0));
3668         }
3669
3670         outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3671
3672         *global_data |=
3673                 outerip_off |
3674                 (!!(xmit_type & XMIT_CSUM_V6) <<
3675                         ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3676                 (outerip_len <<
3677                         ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3678                 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3679                         ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3680
3681         if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3682                 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3683                 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3684         }
3685 }
3686
3687 /* called with netif_tx_lock
3688  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3689  * netif_wake_queue()
3690  */
3691 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3692 {
3693         struct bnx2x *bp = netdev_priv(dev);
3694
3695         struct netdev_queue *txq;
3696         struct bnx2x_fp_txdata *txdata;
3697         struct sw_tx_bd *tx_buf;
3698         struct eth_tx_start_bd *tx_start_bd, *first_bd;
3699         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3700         struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3701         struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3702         struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3703         u32 pbd_e2_parsing_data = 0;
3704         u16 pkt_prod, bd_prod;
3705         int nbd, txq_index;
3706         dma_addr_t mapping;
3707         u32 xmit_type = bnx2x_xmit_type(bp, skb);
3708         int i;
3709         u8 hlen = 0;
3710         __le16 pkt_size = 0;
3711         struct ethhdr *eth;
3712         u8 mac_type = UNICAST_ADDRESS;
3713
3714 #ifdef BNX2X_STOP_ON_ERROR
3715         if (unlikely(bp->panic))
3716                 return NETDEV_TX_BUSY;
3717 #endif
3718
3719         txq_index = skb_get_queue_mapping(skb);
3720         txq = netdev_get_tx_queue(dev, txq_index);
3721
3722         BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3723
3724         txdata = &bp->bnx2x_txq[txq_index];
3725
3726         /* enable this debug print to view the transmission queue being used
3727         DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3728            txq_index, fp_index, txdata_index); */
3729
3730         /* enable this debug print to view the transmission details
3731         DP(NETIF_MSG_TX_QUEUED,
3732            "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3733            txdata->cid, fp_index, txdata_index, txdata, fp); */
3734
3735         if (unlikely(bnx2x_tx_avail(bp, txdata) <
3736                         skb_shinfo(skb)->nr_frags +
3737                         BDS_PER_TX_PKT +
3738                         NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3739                 /* Handle special storage cases separately */
3740                 if (txdata->tx_ring_size == 0) {
3741                         struct bnx2x_eth_q_stats *q_stats =
3742                                 bnx2x_fp_qstats(bp, txdata->parent_fp);
3743                         q_stats->driver_filtered_tx_pkt++;
3744                         dev_kfree_skb(skb);
3745                         return NETDEV_TX_OK;
3746                 }
3747                 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3748                 netif_tx_stop_queue(txq);
3749                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3750
3751                 return NETDEV_TX_BUSY;
3752         }
3753
3754         DP(NETIF_MSG_TX_QUEUED,
3755            "queue[%d]: SKB: summed %x  protocol %x protocol(%x,%x) gso type %x  xmit_type %x len %d\n",
3756            txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3757            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3758            skb->len);
3759
3760         eth = (struct ethhdr *)skb->data;
3761
3762         /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3763         if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3764                 if (is_broadcast_ether_addr(eth->h_dest))
3765                         mac_type = BROADCAST_ADDRESS;
3766                 else
3767                         mac_type = MULTICAST_ADDRESS;
3768         }
3769
3770 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3771         /* First, check if we need to linearize the skb (due to FW
3772            restrictions). No need to check fragmentation if page size > 8K
3773            (there will be no violation to FW restrictions) */
3774         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3775                 /* Statistics of linearization */
3776                 bp->lin_cnt++;
3777                 if (skb_linearize(skb) != 0) {
3778                         DP(NETIF_MSG_TX_QUEUED,
3779                            "SKB linearization failed - silently dropping this SKB\n");
3780                         dev_kfree_skb_any(skb);
3781                         return NETDEV_TX_OK;
3782                 }
3783         }
3784 #endif
3785         /* Map skb linear data for DMA */
3786         mapping = dma_map_single(&bp->pdev->dev, skb->data,
3787                                  skb_headlen(skb), DMA_TO_DEVICE);
3788         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3789                 DP(NETIF_MSG_TX_QUEUED,
3790                    "SKB mapping failed - silently dropping this SKB\n");
3791                 dev_kfree_skb_any(skb);
3792                 return NETDEV_TX_OK;
3793         }
3794         /*
3795         Please read carefully. First we use one BD which we mark as start,
3796         then we have a parsing info BD (used for TSO or xsum),
3797         and only then we have the rest of the TSO BDs.
3798         (don't forget to mark the last one as last,
3799         and to unmap only AFTER you write to the BD ...)
3800         And above all, all pdb sizes are in words - NOT DWORDS!
3801         */
3802
3803         /* get current pkt produced now - advance it just before sending packet
3804          * since mapping of pages may fail and cause packet to be dropped
3805          */
3806         pkt_prod = txdata->tx_pkt_prod;
3807         bd_prod = TX_BD(txdata->tx_bd_prod);
3808
3809         /* get a tx_buf and first BD
3810          * tx_start_bd may be changed during SPLIT,
3811          * but first_bd will always stay first
3812          */
3813         tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3814         tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3815         first_bd = tx_start_bd;
3816
3817         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3818
3819         /* header nbd: indirectly zero other flags! */
3820         tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3821
3822         /* remember the first BD of the packet */
3823         tx_buf->first_bd = txdata->tx_bd_prod;
3824         tx_buf->skb = skb;
3825         tx_buf->flags = 0;
3826
3827         DP(NETIF_MSG_TX_QUEUED,
3828            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
3829            pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3830
3831         if (vlan_tx_tag_present(skb)) {
3832                 tx_start_bd->vlan_or_ethertype =
3833                     cpu_to_le16(vlan_tx_tag_get(skb));
3834                 tx_start_bd->bd_flags.as_bitfield |=
3835                     (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3836         } else {
3837                 /* when transmitting in a vf, start bd must hold the ethertype
3838                  * for fw to enforce it
3839                  */
3840                 if (IS_VF(bp))
3841                         tx_start_bd->vlan_or_ethertype =
3842                                 cpu_to_le16(ntohs(eth->h_proto));
3843                 else
3844                         /* used by FW for packet accounting */
3845                         tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3846         }
3847
3848         nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3849
3850         /* turn on parsing and get a BD */
3851         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3852
3853         if (xmit_type & XMIT_CSUM)
3854                 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3855
3856         if (!CHIP_IS_E1x(bp)) {
3857                 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3858                 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3859
3860                 if (xmit_type & XMIT_CSUM_ENC) {
3861                         u16 global_data = 0;
3862
3863                         /* Set PBD in enc checksum offload case */
3864                         hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3865                                                       &pbd_e2_parsing_data,
3866                                                       xmit_type);
3867
3868                         /* turn on 2nd parsing and get a BD */
3869                         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3870
3871                         pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3872
3873                         memset(pbd2, 0, sizeof(*pbd2));
3874
3875                         pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3876                                 (skb_inner_network_header(skb) -
3877                                  skb->data) >> 1;
3878
3879                         if (xmit_type & XMIT_GSO_ENC)
3880                                 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3881                                                           &global_data,
3882                                                           xmit_type);
3883
3884                         pbd2->global_data = cpu_to_le16(global_data);
3885
3886                         /* add addition parse BD indication to start BD */
3887                         SET_FLAG(tx_start_bd->general_data,
3888                                  ETH_TX_START_BD_PARSE_NBDS, 1);
3889                         /* set encapsulation flag in start BD */
3890                         SET_FLAG(tx_start_bd->general_data,
3891                                  ETH_TX_START_BD_TUNNEL_EXIST, 1);
3892                         nbd++;
3893                 } else if (xmit_type & XMIT_CSUM) {
3894                         /* Set PBD in checksum offload case w/o encapsulation */
3895                         hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3896                                                      &pbd_e2_parsing_data,
3897                                                      xmit_type);
3898                 }
3899
3900                 /* Add the macs to the parsing BD if this is a vf or if
3901                  * Tx Switching is enabled.
3902                  */
3903                 if (IS_VF(bp)) {
3904                         /* override GRE parameters in BD */
3905                         bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3906                                               &pbd_e2->data.mac_addr.src_mid,
3907                                               &pbd_e2->data.mac_addr.src_lo,
3908                                               eth->h_source);
3909
3910                         bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3911                                               &pbd_e2->data.mac_addr.dst_mid,
3912                                               &pbd_e2->data.mac_addr.dst_lo,
3913                                               eth->h_dest);
3914                 } else if (bp->flags & TX_SWITCHING) {
3915                         bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3916                                               &pbd_e2->data.mac_addr.dst_mid,
3917                                               &pbd_e2->data.mac_addr.dst_lo,
3918                                               eth->h_dest);
3919                 }
3920
3921                 SET_FLAG(pbd_e2_parsing_data,
3922                          ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3923         } else {
3924                 u16 global_data = 0;
3925                 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3926                 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3927                 /* Set PBD in checksum offload case */
3928                 if (xmit_type & XMIT_CSUM)
3929                         hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3930
3931                 SET_FLAG(global_data,
3932                          ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3933                 pbd_e1x->global_data |= cpu_to_le16(global_data);
3934         }
3935
3936         /* Setup the data pointer of the first BD of the packet */
3937         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3938         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3939         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3940         pkt_size = tx_start_bd->nbytes;
3941
3942         DP(NETIF_MSG_TX_QUEUED,
3943            "first bd @%p  addr (%x:%x)  nbytes %d  flags %x  vlan %x\n",
3944            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3945            le16_to_cpu(tx_start_bd->nbytes),
3946            tx_start_bd->bd_flags.as_bitfield,
3947            le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3948
3949         if (xmit_type & XMIT_GSO) {
3950
3951                 DP(NETIF_MSG_TX_QUEUED,
3952                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
3953                    skb->len, hlen, skb_headlen(skb),
3954                    skb_shinfo(skb)->gso_size);
3955
3956                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3957
3958                 if (unlikely(skb_headlen(skb) > hlen)) {
3959                         nbd++;
3960                         bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3961                                                  &tx_start_bd, hlen,
3962                                                  bd_prod);
3963                 }
3964                 if (!CHIP_IS_E1x(bp))
3965                         bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3966                                              xmit_type);
3967                 else
3968                         bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
3969         }
3970
3971         /* Set the PBD's parsing_data field if not zero
3972          * (for the chips newer than 57711).
3973          */
3974         if (pbd_e2_parsing_data)
3975                 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3976
3977         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3978
3979         /* Handle fragmented skb */
3980         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3981                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3982
3983                 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3984                                            skb_frag_size(frag), DMA_TO_DEVICE);
3985                 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3986                         unsigned int pkts_compl = 0, bytes_compl = 0;
3987
3988                         DP(NETIF_MSG_TX_QUEUED,
3989                            "Unable to map page - dropping packet...\n");
3990
3991                         /* we need unmap all buffers already mapped
3992                          * for this SKB;
3993                          * first_bd->nbd need to be properly updated
3994                          * before call to bnx2x_free_tx_pkt
3995                          */
3996                         first_bd->nbd = cpu_to_le16(nbd);
3997                         bnx2x_free_tx_pkt(bp, txdata,
3998                                           TX_BD(txdata->tx_pkt_prod),
3999                                           &pkts_compl, &bytes_compl);
4000                         return NETDEV_TX_OK;
4001                 }
4002
4003                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4004                 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4005                 if (total_pkt_bd == NULL)
4006                         total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4007
4008                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4009                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4010                 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4011                 le16_add_cpu(&pkt_size, skb_frag_size(frag));
4012                 nbd++;
4013
4014                 DP(NETIF_MSG_TX_QUEUED,
4015                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
4016                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4017                    le16_to_cpu(tx_data_bd->nbytes));
4018         }
4019
4020         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4021
4022         /* update with actual num BDs */
4023         first_bd->nbd = cpu_to_le16(nbd);
4024
4025         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4026
4027         /* now send a tx doorbell, counting the next BD
4028          * if the packet contains or ends with it
4029          */
4030         if (TX_BD_POFF(bd_prod) < nbd)
4031                 nbd++;
4032
4033         /* total_pkt_bytes should be set on the first data BD if
4034          * it's not an LSO packet and there is more than one
4035          * data BD. In this case pkt_size is limited by an MTU value.
4036          * However we prefer to set it for an LSO packet (while we don't
4037          * have to) in order to save some CPU cycles in a none-LSO
4038          * case, when we much more care about them.
4039          */
4040         if (total_pkt_bd != NULL)
4041                 total_pkt_bd->total_pkt_bytes = pkt_size;
4042
4043         if (pbd_e1x)
4044                 DP(NETIF_MSG_TX_QUEUED,
4045                    "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
4046                    pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4047                    pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4048                    pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4049                     le16_to_cpu(pbd_e1x->total_hlen_w));
4050         if (pbd_e2)
4051                 DP(NETIF_MSG_TX_QUEUED,
4052                    "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
4053                    pbd_e2,
4054                    pbd_e2->data.mac_addr.dst_hi,
4055                    pbd_e2->data.mac_addr.dst_mid,
4056                    pbd_e2->data.mac_addr.dst_lo,
4057                    pbd_e2->data.mac_addr.src_hi,
4058                    pbd_e2->data.mac_addr.src_mid,
4059                    pbd_e2->data.mac_addr.src_lo,
4060                    pbd_e2->parsing_data);
4061         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
4062
4063         netdev_tx_sent_queue(txq, skb->len);
4064
4065         skb_tx_timestamp(skb);
4066
4067         txdata->tx_pkt_prod++;
4068         /*
4069          * Make sure that the BD data is updated before updating the producer
4070          * since FW might read the BD right after the producer is updated.
4071          * This is only applicable for weak-ordered memory model archs such
4072          * as IA-64. The following barrier is also mandatory since FW will
4073          * assumes packets must have BDs.
4074          */
4075         wmb();
4076
4077         txdata->tx_db.data.prod += nbd;
4078         barrier();
4079
4080         DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
4081
4082         mmiowb();
4083
4084         txdata->tx_bd_prod += nbd;
4085
4086         if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4087                 netif_tx_stop_queue(txq);
4088
4089                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4090                  * ordering of set_bit() in netif_tx_stop_queue() and read of
4091                  * fp->bd_tx_cons */
4092                 smp_mb();
4093
4094                 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4095                 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4096                         netif_tx_wake_queue(txq);
4097         }
4098         txdata->tx_pkt++;
4099
4100         return NETDEV_TX_OK;
4101 }
4102
4103 /**
4104  * bnx2x_setup_tc - routine to configure net_device for multi tc
4105  *
4106  * @netdev: net device to configure
4107  * @tc: number of traffic classes to enable
4108  *
4109  * callback connected to the ndo_setup_tc function pointer
4110  */
4111 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4112 {
4113         int cos, prio, count, offset;
4114         struct bnx2x *bp = netdev_priv(dev);
4115
4116         /* setup tc must be called under rtnl lock */
4117         ASSERT_RTNL();
4118
4119         /* no traffic classes requested. Aborting */
4120         if (!num_tc) {
4121                 netdev_reset_tc(dev);
4122                 return 0;
4123         }
4124
4125         /* requested to support too many traffic classes */
4126         if (num_tc > bp->max_cos) {
4127                 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4128                           num_tc, bp->max_cos);
4129                 return -EINVAL;
4130         }
4131
4132         /* declare amount of supported traffic classes */
4133         if (netdev_set_num_tc(dev, num_tc)) {
4134                 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4135                 return -EINVAL;
4136         }
4137
4138         /* configure priority to traffic class mapping */
4139         for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4140                 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
4141                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4142                    "mapping priority %d to tc %d\n",
4143                    prio, bp->prio_to_cos[prio]);
4144         }
4145
4146         /* Use this configuration to differentiate tc0 from other COSes
4147            This can be used for ets or pfc, and save the effort of setting
4148            up a multio class queue disc or negotiating DCBX with a switch
4149         netdev_set_prio_tc_map(dev, 0, 0);
4150         DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4151         for (prio = 1; prio < 16; prio++) {
4152                 netdev_set_prio_tc_map(dev, prio, 1);
4153                 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4154         } */
4155
4156         /* configure traffic class to transmission queue mapping */
4157         for (cos = 0; cos < bp->max_cos; cos++) {
4158                 count = BNX2X_NUM_ETH_QUEUES(bp);
4159                 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4160                 netdev_set_tc_queue(dev, cos, count, offset);
4161                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4162                    "mapping tc %d to offset %d count %d\n",
4163                    cos, offset, count);
4164         }
4165
4166         return 0;
4167 }
4168
4169 /* called with rtnl_lock */
4170 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4171 {
4172         struct sockaddr *addr = p;
4173         struct bnx2x *bp = netdev_priv(dev);
4174         int rc = 0;
4175
4176         if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4177                 BNX2X_ERR("Requested MAC address is not valid\n");
4178                 return -EINVAL;
4179         }
4180
4181         if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4182             !is_zero_ether_addr(addr->sa_data)) {
4183                 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
4184                 return -EINVAL;
4185         }
4186
4187         if (netif_running(dev))  {
4188                 rc = bnx2x_set_eth_mac(bp, false);
4189                 if (rc)
4190                         return rc;
4191         }
4192
4193         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4194
4195         if (netif_running(dev))
4196                 rc = bnx2x_set_eth_mac(bp, true);
4197
4198         return rc;
4199 }
4200
4201 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4202 {
4203         union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4204         struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4205         u8 cos;
4206
4207         /* Common */
4208
4209         if (IS_FCOE_IDX(fp_index)) {
4210                 memset(sb, 0, sizeof(union host_hc_status_block));
4211                 fp->status_blk_mapping = 0;
4212         } else {
4213                 /* status blocks */
4214                 if (!CHIP_IS_E1x(bp))
4215                         BNX2X_PCI_FREE(sb->e2_sb,
4216                                        bnx2x_fp(bp, fp_index,
4217                                                 status_blk_mapping),
4218                                        sizeof(struct host_hc_status_block_e2));
4219                 else
4220                         BNX2X_PCI_FREE(sb->e1x_sb,
4221                                        bnx2x_fp(bp, fp_index,
4222                                                 status_blk_mapping),
4223                                        sizeof(struct host_hc_status_block_e1x));
4224         }
4225
4226         /* Rx */
4227         if (!skip_rx_queue(bp, fp_index)) {
4228                 bnx2x_free_rx_bds(fp);
4229
4230                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4231                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4232                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4233                                bnx2x_fp(bp, fp_index, rx_desc_mapping),
4234                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
4235
4236                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4237                                bnx2x_fp(bp, fp_index, rx_comp_mapping),
4238                                sizeof(struct eth_fast_path_rx_cqe) *
4239                                NUM_RCQ_BD);
4240
4241                 /* SGE ring */
4242                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4243                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4244                                bnx2x_fp(bp, fp_index, rx_sge_mapping),
4245                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4246         }
4247
4248         /* Tx */
4249         if (!skip_tx_queue(bp, fp_index)) {
4250                 /* fastpath tx rings: tx_buf tx_desc */
4251                 for_each_cos_in_tx_queue(fp, cos) {
4252                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4253
4254                         DP(NETIF_MSG_IFDOWN,
4255                            "freeing tx memory of fp %d cos %d cid %d\n",
4256                            fp_index, cos, txdata->cid);
4257
4258                         BNX2X_FREE(txdata->tx_buf_ring);
4259                         BNX2X_PCI_FREE(txdata->tx_desc_ring,
4260                                 txdata->tx_desc_mapping,
4261                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4262                 }
4263         }
4264         /* end of fastpath */
4265 }
4266
4267 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4268 {
4269         int i;
4270         for_each_cnic_queue(bp, i)
4271                 bnx2x_free_fp_mem_at(bp, i);
4272 }
4273
4274 void bnx2x_free_fp_mem(struct bnx2x *bp)
4275 {
4276         int i;
4277         for_each_eth_queue(bp, i)
4278                 bnx2x_free_fp_mem_at(bp, i);
4279 }
4280
4281 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4282 {
4283         union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4284         if (!CHIP_IS_E1x(bp)) {
4285                 bnx2x_fp(bp, index, sb_index_values) =
4286                         (__le16 *)status_blk.e2_sb->sb.index_values;
4287                 bnx2x_fp(bp, index, sb_running_index) =
4288                         (__le16 *)status_blk.e2_sb->sb.running_index;
4289         } else {
4290                 bnx2x_fp(bp, index, sb_index_values) =
4291                         (__le16 *)status_blk.e1x_sb->sb.index_values;
4292                 bnx2x_fp(bp, index, sb_running_index) =
4293                         (__le16 *)status_blk.e1x_sb->sb.running_index;
4294         }
4295 }
4296
4297 /* Returns the number of actually allocated BDs */
4298 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4299                               int rx_ring_size)
4300 {
4301         struct bnx2x *bp = fp->bp;
4302         u16 ring_prod, cqe_ring_prod;
4303         int i, failure_cnt = 0;
4304
4305         fp->rx_comp_cons = 0;
4306         cqe_ring_prod = ring_prod = 0;
4307
4308         /* This routine is called only during fo init so
4309          * fp->eth_q_stats.rx_skb_alloc_failed = 0
4310          */
4311         for (i = 0; i < rx_ring_size; i++) {
4312                 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4313                         failure_cnt++;
4314                         continue;
4315                 }
4316                 ring_prod = NEXT_RX_IDX(ring_prod);
4317                 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4318                 WARN_ON(ring_prod <= (i - failure_cnt));
4319         }
4320
4321         if (failure_cnt)
4322                 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4323                           i - failure_cnt, fp->index);
4324
4325         fp->rx_bd_prod = ring_prod;
4326         /* Limit the CQE producer by the CQE ring size */
4327         fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4328                                cqe_ring_prod);
4329         fp->rx_pkt = fp->rx_calls = 0;
4330
4331         bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4332
4333         return i - failure_cnt;
4334 }
4335
4336 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4337 {
4338         int i;
4339
4340         for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4341                 struct eth_rx_cqe_next_page *nextpg;
4342
4343                 nextpg = (struct eth_rx_cqe_next_page *)
4344                         &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4345                 nextpg->addr_hi =
4346                         cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4347                                    BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4348                 nextpg->addr_lo =
4349                         cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4350                                    BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4351         }
4352 }
4353
4354 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4355 {
4356         union host_hc_status_block *sb;
4357         struct bnx2x_fastpath *fp = &bp->fp[index];
4358         int ring_size = 0;
4359         u8 cos;
4360         int rx_ring_size = 0;
4361
4362         if (!bp->rx_ring_size &&
4363             (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
4364                 rx_ring_size = MIN_RX_SIZE_NONTPA;
4365                 bp->rx_ring_size = rx_ring_size;
4366         } else if (!bp->rx_ring_size) {
4367                 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4368
4369                 if (CHIP_IS_E3(bp)) {
4370                         u32 cfg = SHMEM_RD(bp,
4371                                            dev_info.port_hw_config[BP_PORT(bp)].
4372                                            default_cfg);
4373
4374                         /* Decrease ring size for 1G functions */
4375                         if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4376                             PORT_HW_CFG_NET_SERDES_IF_SGMII)
4377                                 rx_ring_size /= 10;
4378                 }
4379
4380                 /* allocate at least number of buffers required by FW */
4381                 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4382                                      MIN_RX_SIZE_TPA, rx_ring_size);
4383
4384                 bp->rx_ring_size = rx_ring_size;
4385         } else /* if rx_ring_size specified - use it */
4386                 rx_ring_size = bp->rx_ring_size;
4387
4388         DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4389
4390         /* Common */
4391         sb = &bnx2x_fp(bp, index, status_blk);
4392
4393         if (!IS_FCOE_IDX(index)) {
4394                 /* status blocks */
4395                 if (!CHIP_IS_E1x(bp)) {
4396                         sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4397                                                     sizeof(struct host_hc_status_block_e2));
4398                         if (!sb->e2_sb)
4399                                 goto alloc_mem_err;
4400                 } else {
4401                         sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4402                                                      sizeof(struct host_hc_status_block_e1x));
4403                         if (!sb->e1x_sb)
4404                                 goto alloc_mem_err;
4405                 }
4406         }
4407
4408         /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4409          * set shortcuts for it.
4410          */
4411         if (!IS_FCOE_IDX(index))
4412                 set_sb_shortcuts(bp, index);
4413
4414         /* Tx */
4415         if (!skip_tx_queue(bp, index)) {
4416                 /* fastpath tx rings: tx_buf tx_desc */
4417                 for_each_cos_in_tx_queue(fp, cos) {
4418                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4419
4420                         DP(NETIF_MSG_IFUP,
4421                            "allocating tx memory of fp %d cos %d\n",
4422                            index, cos);
4423
4424                         txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4425                                                       sizeof(struct sw_tx_bd),
4426                                                       GFP_KERNEL);
4427                         if (!txdata->tx_buf_ring)
4428                                 goto alloc_mem_err;
4429                         txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4430                                                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4431                         if (!txdata->tx_desc_ring)
4432                                 goto alloc_mem_err;
4433                 }
4434         }
4435
4436         /* Rx */
4437         if (!skip_rx_queue(bp, index)) {
4438                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4439                 bnx2x_fp(bp, index, rx_buf_ring) =
4440                         kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4441                 if (!bnx2x_fp(bp, index, rx_buf_ring))
4442                         goto alloc_mem_err;
4443                 bnx2x_fp(bp, index, rx_desc_ring) =
4444                         BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4445                                         sizeof(struct eth_rx_bd) * NUM_RX_BD);
4446                 if (!bnx2x_fp(bp, index, rx_desc_ring))
4447                         goto alloc_mem_err;
4448
4449                 /* Seed all CQEs by 1s */
4450                 bnx2x_fp(bp, index, rx_comp_ring) =
4451                         BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4452                                          sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4453                 if (!bnx2x_fp(bp, index, rx_comp_ring))
4454                         goto alloc_mem_err;
4455
4456                 /* SGE ring */
4457                 bnx2x_fp(bp, index, rx_page_ring) =
4458                         kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4459                                 GFP_KERNEL);
4460                 if (!bnx2x_fp(bp, index, rx_page_ring))
4461                         goto alloc_mem_err;
4462                 bnx2x_fp(bp, index, rx_sge_ring) =
4463                         BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4464                                         BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4465                 if (!bnx2x_fp(bp, index, rx_sge_ring))
4466                         goto alloc_mem_err;
4467                 /* RX BD ring */
4468                 bnx2x_set_next_page_rx_bd(fp);
4469
4470                 /* CQ ring */
4471                 bnx2x_set_next_page_rx_cq(fp);
4472
4473                 /* BDs */
4474                 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4475                 if (ring_size < rx_ring_size)
4476                         goto alloc_mem_err;
4477         }
4478
4479         return 0;
4480
4481 /* handles low memory cases */
4482 alloc_mem_err:
4483         BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4484                                                 index, ring_size);
4485         /* FW will drop all packets if queue is not big enough,
4486          * In these cases we disable the queue
4487          * Min size is different for OOO, TPA and non-TPA queues
4488          */
4489         if (ring_size < (fp->disable_tpa ?
4490                                 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4491                         /* release memory allocated for this queue */
4492                         bnx2x_free_fp_mem_at(bp, index);
4493                         return -ENOMEM;
4494         }
4495         return 0;
4496 }
4497
4498 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4499 {
4500         if (!NO_FCOE(bp))
4501                 /* FCoE */
4502                 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4503                         /* we will fail load process instead of mark
4504                          * NO_FCOE_FLAG
4505                          */
4506                         return -ENOMEM;
4507
4508         return 0;
4509 }
4510
4511 static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4512 {
4513         int i;
4514
4515         /* 1. Allocate FP for leading - fatal if error
4516          * 2. Allocate RSS - fix number of queues if error
4517          */
4518
4519         /* leading */
4520         if (bnx2x_alloc_fp_mem_at(bp, 0))
4521                 return -ENOMEM;
4522
4523         /* RSS */
4524         for_each_nondefault_eth_queue(bp, i)
4525                 if (bnx2x_alloc_fp_mem_at(bp, i))
4526                         break;
4527
4528         /* handle memory failures */
4529         if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4530                 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4531
4532                 WARN_ON(delta < 0);
4533                 bnx2x_shrink_eth_fp(bp, delta);
4534                 if (CNIC_SUPPORT(bp))
4535                         /* move non eth FPs next to last eth FP
4536                          * must be done in that order
4537                          * FCOE_IDX < FWD_IDX < OOO_IDX
4538                          */
4539
4540                         /* move FCoE fp even NO_FCOE_FLAG is on */
4541                         bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4542                 bp->num_ethernet_queues -= delta;
4543                 bp->num_queues = bp->num_ethernet_queues +
4544                                  bp->num_cnic_queues;
4545                 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4546                           bp->num_queues + delta, bp->num_queues);
4547         }
4548
4549         return 0;
4550 }
4551
4552 void bnx2x_free_mem_bp(struct bnx2x *bp)
4553 {
4554         int i;
4555
4556         for (i = 0; i < bp->fp_array_size; i++)
4557                 kfree(bp->fp[i].tpa_info);
4558         kfree(bp->fp);
4559         kfree(bp->sp_objs);
4560         kfree(bp->fp_stats);
4561         kfree(bp->bnx2x_txq);
4562         kfree(bp->msix_table);
4563         kfree(bp->ilt);
4564 }
4565
4566 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4567 {
4568         struct bnx2x_fastpath *fp;
4569         struct msix_entry *tbl;
4570         struct bnx2x_ilt *ilt;
4571         int msix_table_size = 0;
4572         int fp_array_size, txq_array_size;
4573         int i;
4574
4575         /*
4576          * The biggest MSI-X table we might need is as a maximum number of fast
4577          * path IGU SBs plus default SB (for PF only).
4578          */
4579         msix_table_size = bp->igu_sb_cnt;
4580         if (IS_PF(bp))
4581                 msix_table_size++;
4582         BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4583
4584         /* fp array: RSS plus CNIC related L2 queues */
4585         fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4586         bp->fp_array_size = fp_array_size;
4587         BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4588
4589         fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4590         if (!fp)
4591                 goto alloc_err;
4592         for (i = 0; i < bp->fp_array_size; i++) {
4593                 fp[i].tpa_info =
4594                         kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4595                                 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4596                 if (!(fp[i].tpa_info))
4597                         goto alloc_err;
4598         }
4599
4600         bp->fp = fp;
4601
4602         /* allocate sp objs */
4603         bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4604                               GFP_KERNEL);
4605         if (!bp->sp_objs)
4606                 goto alloc_err;
4607
4608         /* allocate fp_stats */
4609         bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4610                                GFP_KERNEL);
4611         if (!bp->fp_stats)
4612                 goto alloc_err;
4613
4614         /* Allocate memory for the transmission queues array */
4615         txq_array_size =
4616                 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4617         BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4618
4619         bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4620                                 GFP_KERNEL);
4621         if (!bp->bnx2x_txq)
4622                 goto alloc_err;
4623
4624         /* msix table */
4625         tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4626         if (!tbl)
4627                 goto alloc_err;
4628         bp->msix_table = tbl;
4629
4630         /* ilt */
4631         ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4632         if (!ilt)
4633                 goto alloc_err;
4634         bp->ilt = ilt;
4635
4636         return 0;
4637 alloc_err:
4638         bnx2x_free_mem_bp(bp);
4639         return -ENOMEM;
4640 }
4641
4642 int bnx2x_reload_if_running(struct net_device *dev)
4643 {
4644         struct bnx2x *bp = netdev_priv(dev);
4645
4646         if (unlikely(!netif_running(dev)))
4647                 return 0;
4648
4649         bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4650         return bnx2x_nic_load(bp, LOAD_NORMAL);
4651 }
4652
4653 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4654 {
4655         u32 sel_phy_idx = 0;
4656         if (bp->link_params.num_phys <= 1)
4657                 return INT_PHY;
4658
4659         if (bp->link_vars.link_up) {
4660                 sel_phy_idx = EXT_PHY1;
4661                 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4662                 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4663                     (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4664                         sel_phy_idx = EXT_PHY2;
4665         } else {
4666
4667                 switch (bnx2x_phy_selection(&bp->link_params)) {
4668                 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4669                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4670                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4671                        sel_phy_idx = EXT_PHY1;
4672                        break;
4673                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4674                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4675                        sel_phy_idx = EXT_PHY2;
4676                        break;
4677                 }
4678         }
4679
4680         return sel_phy_idx;
4681 }
4682 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4683 {
4684         u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4685         /*
4686          * The selected activated PHY is always after swapping (in case PHY
4687          * swapping is enabled). So when swapping is enabled, we need to reverse
4688          * the configuration
4689          */
4690
4691         if (bp->link_params.multi_phy_config &
4692             PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4693                 if (sel_phy_idx == EXT_PHY1)
4694                         sel_phy_idx = EXT_PHY2;
4695                 else if (sel_phy_idx == EXT_PHY2)
4696                         sel_phy_idx = EXT_PHY1;
4697         }
4698         return LINK_CONFIG_IDX(sel_phy_idx);
4699 }
4700
4701 #ifdef NETDEV_FCOE_WWNN
4702 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4703 {
4704         struct bnx2x *bp = netdev_priv(dev);
4705         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4706
4707         switch (type) {
4708         case NETDEV_FCOE_WWNN:
4709                 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4710                                 cp->fcoe_wwn_node_name_lo);
4711                 break;
4712         case NETDEV_FCOE_WWPN:
4713                 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4714                                 cp->fcoe_wwn_port_name_lo);
4715                 break;
4716         default:
4717                 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4718                 return -EINVAL;
4719         }
4720
4721         return 0;
4722 }
4723 #endif
4724
4725 /* called with rtnl_lock */
4726 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4727 {
4728         struct bnx2x *bp = netdev_priv(dev);
4729
4730         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4731                 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4732                 return -EAGAIN;
4733         }
4734
4735         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4736             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4737                 BNX2X_ERR("Can't support requested MTU size\n");
4738                 return -EINVAL;
4739         }
4740
4741         /* This does not race with packet allocation
4742          * because the actual alloc size is
4743          * only updated as part of load
4744          */
4745         dev->mtu = new_mtu;
4746
4747         return bnx2x_reload_if_running(dev);
4748 }
4749
4750 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4751                                      netdev_features_t features)
4752 {
4753         struct bnx2x *bp = netdev_priv(dev);
4754
4755         /* TPA requires Rx CSUM offloading */
4756         if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
4757                 features &= ~NETIF_F_LRO;
4758                 features &= ~NETIF_F_GRO;
4759         }
4760
4761         return features;
4762 }
4763
4764 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4765 {
4766         struct bnx2x *bp = netdev_priv(dev);
4767         u32 flags = bp->flags;
4768         u32 changes;
4769         bool bnx2x_reload = false;
4770
4771         if (features & NETIF_F_LRO)
4772                 flags |= TPA_ENABLE_FLAG;
4773         else
4774                 flags &= ~TPA_ENABLE_FLAG;
4775
4776         if (features & NETIF_F_GRO)
4777                 flags |= GRO_ENABLE_FLAG;
4778         else
4779                 flags &= ~GRO_ENABLE_FLAG;
4780
4781         if (features & NETIF_F_LOOPBACK) {
4782                 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4783                         bp->link_params.loopback_mode = LOOPBACK_BMAC;
4784                         bnx2x_reload = true;
4785                 }
4786         } else {
4787                 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4788                         bp->link_params.loopback_mode = LOOPBACK_NONE;
4789                         bnx2x_reload = true;
4790                 }
4791         }
4792
4793         changes = flags ^ bp->flags;
4794
4795         /* if GRO is changed while LRO is enabled, don't force a reload */
4796         if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4797                 changes &= ~GRO_ENABLE_FLAG;
4798
4799         if (changes)
4800                 bnx2x_reload = true;
4801
4802         bp->flags = flags;
4803
4804         if (bnx2x_reload) {
4805                 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4806                         return bnx2x_reload_if_running(dev);
4807                 /* else: bnx2x_nic_load() will be called at end of recovery */
4808         }
4809
4810         return 0;
4811 }
4812
4813 void bnx2x_tx_timeout(struct net_device *dev)
4814 {
4815         struct bnx2x *bp = netdev_priv(dev);
4816
4817 #ifdef BNX2X_STOP_ON_ERROR
4818         if (!bp->panic)
4819                 bnx2x_panic();
4820 #endif
4821
4822         /* This allows the netif to be shutdown gracefully before resetting */
4823         bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
4824 }
4825
4826 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4827 {
4828         struct net_device *dev = pci_get_drvdata(pdev);
4829         struct bnx2x *bp;
4830
4831         if (!dev) {
4832                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4833                 return -ENODEV;
4834         }
4835         bp = netdev_priv(dev);
4836
4837         rtnl_lock();
4838
4839         pci_save_state(pdev);
4840
4841         if (!netif_running(dev)) {
4842                 rtnl_unlock();
4843                 return 0;
4844         }
4845
4846         netif_device_detach(dev);
4847
4848         bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4849
4850         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4851
4852         rtnl_unlock();
4853
4854         return 0;
4855 }
4856
4857 int bnx2x_resume(struct pci_dev *pdev)
4858 {
4859         struct net_device *dev = pci_get_drvdata(pdev);
4860         struct bnx2x *bp;
4861         int rc;
4862
4863         if (!dev) {
4864                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4865                 return -ENODEV;
4866         }
4867         bp = netdev_priv(dev);
4868
4869         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4870                 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4871                 return -EAGAIN;
4872         }
4873
4874         rtnl_lock();
4875
4876         pci_restore_state(pdev);
4877
4878         if (!netif_running(dev)) {
4879                 rtnl_unlock();
4880                 return 0;
4881         }
4882
4883         bnx2x_set_power_state(bp, PCI_D0);
4884         netif_device_attach(dev);
4885
4886         rc = bnx2x_nic_load(bp, LOAD_OPEN);
4887
4888         rtnl_unlock();
4889
4890         return rc;
4891 }
4892
4893 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4894                               u32 cid)
4895 {
4896         if (!cxt) {
4897                 BNX2X_ERR("bad context pointer %p\n", cxt);
4898                 return;
4899         }
4900
4901         /* ustorm cxt validation */
4902         cxt->ustorm_ag_context.cdu_usage =
4903                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4904                         CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4905         /* xcontext validation */
4906         cxt->xstorm_ag_context.cdu_reserved =
4907                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4908                         CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4909 }
4910
4911 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4912                                     u8 fw_sb_id, u8 sb_index,
4913                                     u8 ticks)
4914 {
4915         u32 addr = BAR_CSTRORM_INTMEM +
4916                    CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4917         REG_WR8(bp, addr, ticks);
4918         DP(NETIF_MSG_IFUP,
4919            "port %x fw_sb_id %d sb_index %d ticks %d\n",
4920            port, fw_sb_id, sb_index, ticks);
4921 }
4922
4923 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4924                                     u16 fw_sb_id, u8 sb_index,
4925                                     u8 disable)
4926 {
4927         u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4928         u32 addr = BAR_CSTRORM_INTMEM +
4929                    CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4930         u8 flags = REG_RD8(bp, addr);
4931         /* clear and set */
4932         flags &= ~HC_INDEX_DATA_HC_ENABLED;
4933         flags |= enable_flag;
4934         REG_WR8(bp, addr, flags);
4935         DP(NETIF_MSG_IFUP,
4936            "port %x fw_sb_id %d sb_index %d disable %d\n",
4937            port, fw_sb_id, sb_index, disable);
4938 }
4939
4940 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4941                                     u8 sb_index, u8 disable, u16 usec)
4942 {
4943         int port = BP_PORT(bp);
4944         u8 ticks = usec / BNX2X_BTR;
4945
4946         storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4947
4948         disable = disable ? 1 : (usec ? 0 : 1);
4949         storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4950 }
4951
4952 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
4953                             u32 verbose)
4954 {
4955         smp_mb__before_atomic();
4956         set_bit(flag, &bp->sp_rtnl_state);
4957         smp_mb__after_atomic();
4958         DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
4959            flag);
4960         schedule_delayed_work(&bp->sp_rtnl_task, 0);
4961 }
4962 EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);