]> git.karo-electronics.de Git - linux-beck.git/blob - drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/signal
[linux-beck.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
1 /* bnx2x_cmn.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2012 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
23 #include <linux/ip.h>
24 #include <net/ipv6.h>
25 #include <net/ip6_checksum.h>
26 #include <linux/prefetch.h>
27 #include "bnx2x_cmn.h"
28 #include "bnx2x_init.h"
29 #include "bnx2x_sp.h"
30
31
32
33 /**
34  * bnx2x_move_fp - move content of the fastpath structure.
35  *
36  * @bp:         driver handle
37  * @from:       source FP index
38  * @to:         destination FP index
39  *
40  * Makes sure the contents of the bp->fp[to].napi is kept
41  * intact. This is done by first copying the napi struct from
42  * the target to the source, and then mem copying the entire
43  * source onto the target. Update txdata pointers and related
44  * content.
45  */
46 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
47 {
48         struct bnx2x_fastpath *from_fp = &bp->fp[from];
49         struct bnx2x_fastpath *to_fp = &bp->fp[to];
50         struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
51         struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
52         struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
53         struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
54         int old_max_eth_txqs, new_max_eth_txqs;
55         int old_txdata_index = 0, new_txdata_index = 0;
56
57         /* Copy the NAPI object as it has been already initialized */
58         from_fp->napi = to_fp->napi;
59
60         /* Move bnx2x_fastpath contents */
61         memcpy(to_fp, from_fp, sizeof(*to_fp));
62         to_fp->index = to;
63
64         /* move sp_objs contents as well, as their indices match fp ones */
65         memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
66
67         /* move fp_stats contents as well, as their indices match fp ones */
68         memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
69
70         /* Update txdata pointers in fp and move txdata content accordingly:
71          * Each fp consumes 'max_cos' txdata structures, so the index should be
72          * decremented by max_cos x delta.
73          */
74
75         old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
76         new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
77                                 (bp)->max_cos;
78         if (from == FCOE_IDX(bp)) {
79                 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80                 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
81         }
82
83         memcpy(&bp->bnx2x_txq[new_txdata_index],
84                &bp->bnx2x_txq[old_txdata_index],
85                sizeof(struct bnx2x_fp_txdata));
86         to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
87 }
88
89 /**
90  * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
91  *
92  * @bp: driver handle
93  * @delta:      number of eth queues which were not allocated
94  */
95 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
96 {
97         int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
98
99         /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
100          * backward along the array could cause memory to be overriden
101          */
102         for (cos = 1; cos < bp->max_cos; cos++) {
103                 for (i = 0; i < old_eth_num - delta; i++) {
104                         struct bnx2x_fastpath *fp = &bp->fp[i];
105                         int new_idx = cos * (old_eth_num - delta) + i;
106
107                         memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
108                                sizeof(struct bnx2x_fp_txdata));
109                         fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
110                 }
111         }
112 }
113
114 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
115
116 /* free skb in the packet ring at pos idx
117  * return idx of last bd freed
118  */
119 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
120                              u16 idx, unsigned int *pkts_compl,
121                              unsigned int *bytes_compl)
122 {
123         struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
124         struct eth_tx_start_bd *tx_start_bd;
125         struct eth_tx_bd *tx_data_bd;
126         struct sk_buff *skb = tx_buf->skb;
127         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
128         int nbd;
129
130         /* prefetch skb end pointer to speedup dev_kfree_skb() */
131         prefetch(&skb->end);
132
133         DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
134            txdata->txq_index, idx, tx_buf, skb);
135
136         /* unmap first bd */
137         tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
138         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
139                          BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
140
141
142         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
143 #ifdef BNX2X_STOP_ON_ERROR
144         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
145                 BNX2X_ERR("BAD nbd!\n");
146                 bnx2x_panic();
147         }
148 #endif
149         new_cons = nbd + tx_buf->first_bd;
150
151         /* Get the next bd */
152         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
153
154         /* Skip a parse bd... */
155         --nbd;
156         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
157
158         /* ...and the TSO split header bd since they have no mapping */
159         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
160                 --nbd;
161                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
162         }
163
164         /* now free frags */
165         while (nbd > 0) {
166
167                 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
168                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
169                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
170                 if (--nbd)
171                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
172         }
173
174         /* release skb */
175         WARN_ON(!skb);
176         if (likely(skb)) {
177                 (*pkts_compl)++;
178                 (*bytes_compl) += skb->len;
179         }
180
181         dev_kfree_skb_any(skb);
182         tx_buf->first_bd = 0;
183         tx_buf->skb = NULL;
184
185         return new_cons;
186 }
187
188 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
189 {
190         struct netdev_queue *txq;
191         u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
192         unsigned int pkts_compl = 0, bytes_compl = 0;
193
194 #ifdef BNX2X_STOP_ON_ERROR
195         if (unlikely(bp->panic))
196                 return -1;
197 #endif
198
199         txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
200         hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
201         sw_cons = txdata->tx_pkt_cons;
202
203         while (sw_cons != hw_cons) {
204                 u16 pkt_cons;
205
206                 pkt_cons = TX_BD(sw_cons);
207
208                 DP(NETIF_MSG_TX_DONE,
209                    "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
210                    txdata->txq_index, hw_cons, sw_cons, pkt_cons);
211
212                 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
213                     &pkts_compl, &bytes_compl);
214
215                 sw_cons++;
216         }
217
218         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
219
220         txdata->tx_pkt_cons = sw_cons;
221         txdata->tx_bd_cons = bd_cons;
222
223         /* Need to make the tx_bd_cons update visible to start_xmit()
224          * before checking for netif_tx_queue_stopped().  Without the
225          * memory barrier, there is a small possibility that
226          * start_xmit() will miss it and cause the queue to be stopped
227          * forever.
228          * On the other hand we need an rmb() here to ensure the proper
229          * ordering of bit testing in the following
230          * netif_tx_queue_stopped(txq) call.
231          */
232         smp_mb();
233
234         if (unlikely(netif_tx_queue_stopped(txq))) {
235                 /* Taking tx_lock() is needed to prevent reenabling the queue
236                  * while it's empty. This could have happen if rx_action() gets
237                  * suspended in bnx2x_tx_int() after the condition before
238                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
239                  *
240                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
241                  * sends some packets consuming the whole queue again->
242                  * stops the queue
243                  */
244
245                 __netif_tx_lock(txq, smp_processor_id());
246
247                 if ((netif_tx_queue_stopped(txq)) &&
248                     (bp->state == BNX2X_STATE_OPEN) &&
249                     (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
250                         netif_tx_wake_queue(txq);
251
252                 __netif_tx_unlock(txq);
253         }
254         return 0;
255 }
256
257 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
258                                              u16 idx)
259 {
260         u16 last_max = fp->last_max_sge;
261
262         if (SUB_S16(idx, last_max) > 0)
263                 fp->last_max_sge = idx;
264 }
265
266 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
267                                          u16 sge_len,
268                                          struct eth_end_agg_rx_cqe *cqe)
269 {
270         struct bnx2x *bp = fp->bp;
271         u16 last_max, last_elem, first_elem;
272         u16 delta = 0;
273         u16 i;
274
275         if (!sge_len)
276                 return;
277
278         /* First mark all used pages */
279         for (i = 0; i < sge_len; i++)
280                 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
281                         RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
282
283         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
284            sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
285
286         /* Here we assume that the last SGE index is the biggest */
287         prefetch((void *)(fp->sge_mask));
288         bnx2x_update_last_max_sge(fp,
289                 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
290
291         last_max = RX_SGE(fp->last_max_sge);
292         last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
293         first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
294
295         /* If ring is not full */
296         if (last_elem + 1 != first_elem)
297                 last_elem++;
298
299         /* Now update the prod */
300         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
301                 if (likely(fp->sge_mask[i]))
302                         break;
303
304                 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
305                 delta += BIT_VEC64_ELEM_SZ;
306         }
307
308         if (delta > 0) {
309                 fp->rx_sge_prod += delta;
310                 /* clear page-end entries */
311                 bnx2x_clear_sge_mask_next_elems(fp);
312         }
313
314         DP(NETIF_MSG_RX_STATUS,
315            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
316            fp->last_max_sge, fp->rx_sge_prod);
317 }
318
319 /* Set Toeplitz hash value in the skb using the value from the
320  * CQE (calculated by HW).
321  */
322 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
323                             const struct eth_fast_path_rx_cqe *cqe,
324                             bool *l4_rxhash)
325 {
326         /* Set Toeplitz hash from CQE */
327         if ((bp->dev->features & NETIF_F_RXHASH) &&
328             (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
329                 enum eth_rss_hash_type htype;
330
331                 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
332                 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
333                              (htype == TCP_IPV6_HASH_TYPE);
334                 return le32_to_cpu(cqe->rss_hash_result);
335         }
336         *l4_rxhash = false;
337         return 0;
338 }
339
340 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
341                             u16 cons, u16 prod,
342                             struct eth_fast_path_rx_cqe *cqe)
343 {
344         struct bnx2x *bp = fp->bp;
345         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
346         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
347         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
348         dma_addr_t mapping;
349         struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
350         struct sw_rx_bd *first_buf = &tpa_info->first_buf;
351
352         /* print error if current state != stop */
353         if (tpa_info->tpa_state != BNX2X_TPA_STOP)
354                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
355
356         /* Try to map an empty data buffer from the aggregation info  */
357         mapping = dma_map_single(&bp->pdev->dev,
358                                  first_buf->data + NET_SKB_PAD,
359                                  fp->rx_buf_size, DMA_FROM_DEVICE);
360         /*
361          *  ...if it fails - move the skb from the consumer to the producer
362          *  and set the current aggregation state as ERROR to drop it
363          *  when TPA_STOP arrives.
364          */
365
366         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
367                 /* Move the BD from the consumer to the producer */
368                 bnx2x_reuse_rx_data(fp, cons, prod);
369                 tpa_info->tpa_state = BNX2X_TPA_ERROR;
370                 return;
371         }
372
373         /* move empty data from pool to prod */
374         prod_rx_buf->data = first_buf->data;
375         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
376         /* point prod_bd to new data */
377         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
378         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
379
380         /* move partial skb from cons to pool (don't unmap yet) */
381         *first_buf = *cons_rx_buf;
382
383         /* mark bin state as START */
384         tpa_info->parsing_flags =
385                 le16_to_cpu(cqe->pars_flags.flags);
386         tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
387         tpa_info->tpa_state = BNX2X_TPA_START;
388         tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
389         tpa_info->placement_offset = cqe->placement_offset;
390         tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
391         if (fp->mode == TPA_MODE_GRO) {
392                 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
393                 tpa_info->full_page =
394                         SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
395                 tpa_info->gro_size = gro_size;
396         }
397
398 #ifdef BNX2X_STOP_ON_ERROR
399         fp->tpa_queue_used |= (1 << queue);
400 #ifdef _ASM_GENERIC_INT_L64_H
401         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
402 #else
403         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
404 #endif
405            fp->tpa_queue_used);
406 #endif
407 }
408
409 /* Timestamp option length allowed for TPA aggregation:
410  *
411  *              nop nop kind length echo val
412  */
413 #define TPA_TSTAMP_OPT_LEN      12
414 /**
415  * bnx2x_set_lro_mss - calculate the approximate value of the MSS
416  *
417  * @bp:                 driver handle
418  * @parsing_flags:      parsing flags from the START CQE
419  * @len_on_bd:          total length of the first packet for the
420  *                      aggregation.
421  *
422  * Approximate value of the MSS for this aggregation calculated using
423  * the first packet of it.
424  */
425 static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
426                              u16 len_on_bd)
427 {
428         /*
429          * TPA arrgregation won't have either IP options or TCP options
430          * other than timestamp or IPv6 extension headers.
431          */
432         u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
433
434         if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
435             PRS_FLAG_OVERETH_IPV6)
436                 hdrs_len += sizeof(struct ipv6hdr);
437         else /* IPv4 */
438                 hdrs_len += sizeof(struct iphdr);
439
440
441         /* Check if there was a TCP timestamp, if there is it's will
442          * always be 12 bytes length: nop nop kind length echo val.
443          *
444          * Otherwise FW would close the aggregation.
445          */
446         if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
447                 hdrs_len += TPA_TSTAMP_OPT_LEN;
448
449         return len_on_bd - hdrs_len;
450 }
451
452 static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
453                               struct bnx2x_fastpath *fp, u16 index)
454 {
455         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
456         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
457         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
458         dma_addr_t mapping;
459
460         if (unlikely(page == NULL)) {
461                 BNX2X_ERR("Can't alloc sge\n");
462                 return -ENOMEM;
463         }
464
465         mapping = dma_map_page(&bp->pdev->dev, page, 0,
466                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
467         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
468                 __free_pages(page, PAGES_PER_SGE_SHIFT);
469                 BNX2X_ERR("Can't map sge\n");
470                 return -ENOMEM;
471         }
472
473         sw_buf->page = page;
474         dma_unmap_addr_set(sw_buf, mapping, mapping);
475
476         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
477         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
478
479         return 0;
480 }
481
482 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
483                                struct bnx2x_agg_info *tpa_info,
484                                u16 pages,
485                                struct sk_buff *skb,
486                                struct eth_end_agg_rx_cqe *cqe,
487                                u16 cqe_idx)
488 {
489         struct sw_rx_page *rx_pg, old_rx_pg;
490         u32 i, frag_len, frag_size;
491         int err, j, frag_id = 0;
492         u16 len_on_bd = tpa_info->len_on_bd;
493         u16 full_page = 0, gro_size = 0;
494
495         frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
496
497         if (fp->mode == TPA_MODE_GRO) {
498                 gro_size = tpa_info->gro_size;
499                 full_page = tpa_info->full_page;
500         }
501
502         /* This is needed in order to enable forwarding support */
503         if (frag_size) {
504                 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
505                                         tpa_info->parsing_flags, len_on_bd);
506
507                 /* set for GRO */
508                 if (fp->mode == TPA_MODE_GRO)
509                         skb_shinfo(skb)->gso_type =
510                             (GET_FLAG(tpa_info->parsing_flags,
511                                       PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
512                                                 PRS_FLAG_OVERETH_IPV6) ?
513                                 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
514         }
515
516
517 #ifdef BNX2X_STOP_ON_ERROR
518         if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
519                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
520                           pages, cqe_idx);
521                 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
522                 bnx2x_panic();
523                 return -EINVAL;
524         }
525 #endif
526
527         /* Run through the SGL and compose the fragmented skb */
528         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
529                 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
530
531                 /* FW gives the indices of the SGE as if the ring is an array
532                    (meaning that "next" element will consume 2 indices) */
533                 if (fp->mode == TPA_MODE_GRO)
534                         frag_len = min_t(u32, frag_size, (u32)full_page);
535                 else /* LRO */
536                         frag_len = min_t(u32, frag_size,
537                                          (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
538
539                 rx_pg = &fp->rx_page_ring[sge_idx];
540                 old_rx_pg = *rx_pg;
541
542                 /* If we fail to allocate a substitute page, we simply stop
543                    where we are and drop the whole packet */
544                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
545                 if (unlikely(err)) {
546                         bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
547                         return err;
548                 }
549
550                 /* Unmap the page as we r going to pass it to the stack */
551                 dma_unmap_page(&bp->pdev->dev,
552                                dma_unmap_addr(&old_rx_pg, mapping),
553                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
554                 /* Add one frag and update the appropriate fields in the skb */
555                 if (fp->mode == TPA_MODE_LRO)
556                         skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
557                 else { /* GRO */
558                         int rem;
559                         int offset = 0;
560                         for (rem = frag_len; rem > 0; rem -= gro_size) {
561                                 int len = rem > gro_size ? gro_size : rem;
562                                 skb_fill_page_desc(skb, frag_id++,
563                                                    old_rx_pg.page, offset, len);
564                                 if (offset)
565                                         get_page(old_rx_pg.page);
566                                 offset += len;
567                         }
568                 }
569
570                 skb->data_len += frag_len;
571                 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
572                 skb->len += frag_len;
573
574                 frag_size -= frag_len;
575         }
576
577         return 0;
578 }
579
580 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
581 {
582         if (fp->rx_frag_size)
583                 put_page(virt_to_head_page(data));
584         else
585                 kfree(data);
586 }
587
588 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
589 {
590         if (fp->rx_frag_size)
591                 return netdev_alloc_frag(fp->rx_frag_size);
592
593         return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
594 }
595
596
597 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
598                            struct bnx2x_agg_info *tpa_info,
599                            u16 pages,
600                            struct eth_end_agg_rx_cqe *cqe,
601                            u16 cqe_idx)
602 {
603         struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
604         u8 pad = tpa_info->placement_offset;
605         u16 len = tpa_info->len_on_bd;
606         struct sk_buff *skb = NULL;
607         u8 *new_data, *data = rx_buf->data;
608         u8 old_tpa_state = tpa_info->tpa_state;
609
610         tpa_info->tpa_state = BNX2X_TPA_STOP;
611
612         /* If we there was an error during the handling of the TPA_START -
613          * drop this aggregation.
614          */
615         if (old_tpa_state == BNX2X_TPA_ERROR)
616                 goto drop;
617
618         /* Try to allocate the new data */
619         new_data = bnx2x_frag_alloc(fp);
620         /* Unmap skb in the pool anyway, as we are going to change
621            pool entry status to BNX2X_TPA_STOP even if new skb allocation
622            fails. */
623         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
624                          fp->rx_buf_size, DMA_FROM_DEVICE);
625         if (likely(new_data))
626                 skb = build_skb(data, fp->rx_frag_size);
627
628         if (likely(skb)) {
629 #ifdef BNX2X_STOP_ON_ERROR
630                 if (pad + len > fp->rx_buf_size) {
631                         BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
632                                   pad, len, fp->rx_buf_size);
633                         bnx2x_panic();
634                         return;
635                 }
636 #endif
637
638                 skb_reserve(skb, pad + NET_SKB_PAD);
639                 skb_put(skb, len);
640                 skb->rxhash = tpa_info->rxhash;
641                 skb->l4_rxhash = tpa_info->l4_rxhash;
642
643                 skb->protocol = eth_type_trans(skb, bp->dev);
644                 skb->ip_summed = CHECKSUM_UNNECESSARY;
645
646                 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
647                                          skb, cqe, cqe_idx)) {
648                         if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
649                                 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
650                         napi_gro_receive(&fp->napi, skb);
651                 } else {
652                         DP(NETIF_MSG_RX_STATUS,
653                            "Failed to allocate new pages - dropping packet!\n");
654                         dev_kfree_skb_any(skb);
655                 }
656
657
658                 /* put new data in bin */
659                 rx_buf->data = new_data;
660
661                 return;
662         }
663         bnx2x_frag_free(fp, new_data);
664 drop:
665         /* drop the packet and keep the buffer in the bin */
666         DP(NETIF_MSG_RX_STATUS,
667            "Failed to allocate or map a new skb - dropping packet!\n");
668         bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
669 }
670
671 static int bnx2x_alloc_rx_data(struct bnx2x *bp,
672                                struct bnx2x_fastpath *fp, u16 index)
673 {
674         u8 *data;
675         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
676         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
677         dma_addr_t mapping;
678
679         data = bnx2x_frag_alloc(fp);
680         if (unlikely(data == NULL))
681                 return -ENOMEM;
682
683         mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
684                                  fp->rx_buf_size,
685                                  DMA_FROM_DEVICE);
686         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
687                 bnx2x_frag_free(fp, data);
688                 BNX2X_ERR("Can't map rx data\n");
689                 return -ENOMEM;
690         }
691
692         rx_buf->data = data;
693         dma_unmap_addr_set(rx_buf, mapping, mapping);
694
695         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
696         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
697
698         return 0;
699 }
700
701 static
702 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
703                                  struct bnx2x_fastpath *fp,
704                                  struct bnx2x_eth_q_stats *qstats)
705 {
706         /* Do nothing if no L4 csum validation was done.
707          * We do not check whether IP csum was validated. For IPv4 we assume
708          * that if the card got as far as validating the L4 csum, it also
709          * validated the IP csum. IPv6 has no IP csum.
710          */
711         if (cqe->fast_path_cqe.status_flags &
712             ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
713                 return;
714
715         /* If L4 validation was done, check if an error was found. */
716
717         if (cqe->fast_path_cqe.type_error_flags &
718             (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
719              ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
720                 qstats->hw_csum_err++;
721         else
722                 skb->ip_summed = CHECKSUM_UNNECESSARY;
723 }
724
725 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
726 {
727         struct bnx2x *bp = fp->bp;
728         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
729         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
730         int rx_pkt = 0;
731
732 #ifdef BNX2X_STOP_ON_ERROR
733         if (unlikely(bp->panic))
734                 return 0;
735 #endif
736
737         /* CQ "next element" is of the size of the regular element,
738            that's why it's ok here */
739         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
740         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
741                 hw_comp_cons++;
742
743         bd_cons = fp->rx_bd_cons;
744         bd_prod = fp->rx_bd_prod;
745         bd_prod_fw = bd_prod;
746         sw_comp_cons = fp->rx_comp_cons;
747         sw_comp_prod = fp->rx_comp_prod;
748
749         /* Memory barrier necessary as speculative reads of the rx
750          * buffer can be ahead of the index in the status block
751          */
752         rmb();
753
754         DP(NETIF_MSG_RX_STATUS,
755            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
756            fp->index, hw_comp_cons, sw_comp_cons);
757
758         while (sw_comp_cons != hw_comp_cons) {
759                 struct sw_rx_bd *rx_buf = NULL;
760                 struct sk_buff *skb;
761                 union eth_rx_cqe *cqe;
762                 struct eth_fast_path_rx_cqe *cqe_fp;
763                 u8 cqe_fp_flags;
764                 enum eth_rx_cqe_type cqe_fp_type;
765                 u16 len, pad, queue;
766                 u8 *data;
767                 bool l4_rxhash;
768
769 #ifdef BNX2X_STOP_ON_ERROR
770                 if (unlikely(bp->panic))
771                         return 0;
772 #endif
773
774                 comp_ring_cons = RCQ_BD(sw_comp_cons);
775                 bd_prod = RX_BD(bd_prod);
776                 bd_cons = RX_BD(bd_cons);
777
778                 cqe = &fp->rx_comp_ring[comp_ring_cons];
779                 cqe_fp = &cqe->fast_path_cqe;
780                 cqe_fp_flags = cqe_fp->type_error_flags;
781                 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
782
783                 DP(NETIF_MSG_RX_STATUS,
784                    "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
785                    CQE_TYPE(cqe_fp_flags),
786                    cqe_fp_flags, cqe_fp->status_flags,
787                    le32_to_cpu(cqe_fp->rss_hash_result),
788                    le16_to_cpu(cqe_fp->vlan_tag),
789                    le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
790
791                 /* is this a slowpath msg? */
792                 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
793                         bnx2x_sp_event(fp, cqe);
794                         goto next_cqe;
795                 }
796
797                 rx_buf = &fp->rx_buf_ring[bd_cons];
798                 data = rx_buf->data;
799
800                 if (!CQE_TYPE_FAST(cqe_fp_type)) {
801                         struct bnx2x_agg_info *tpa_info;
802                         u16 frag_size, pages;
803 #ifdef BNX2X_STOP_ON_ERROR
804                         /* sanity check */
805                         if (fp->disable_tpa &&
806                             (CQE_TYPE_START(cqe_fp_type) ||
807                              CQE_TYPE_STOP(cqe_fp_type)))
808                                 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
809                                           CQE_TYPE(cqe_fp_type));
810 #endif
811
812                         if (CQE_TYPE_START(cqe_fp_type)) {
813                                 u16 queue = cqe_fp->queue_index;
814                                 DP(NETIF_MSG_RX_STATUS,
815                                    "calling tpa_start on queue %d\n",
816                                    queue);
817
818                                 bnx2x_tpa_start(fp, queue,
819                                                 bd_cons, bd_prod,
820                                                 cqe_fp);
821
822                                 goto next_rx;
823
824                         }
825                         queue = cqe->end_agg_cqe.queue_index;
826                         tpa_info = &fp->tpa_info[queue];
827                         DP(NETIF_MSG_RX_STATUS,
828                            "calling tpa_stop on queue %d\n",
829                            queue);
830
831                         frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
832                                     tpa_info->len_on_bd;
833
834                         if (fp->mode == TPA_MODE_GRO)
835                                 pages = (frag_size + tpa_info->full_page - 1) /
836                                          tpa_info->full_page;
837                         else
838                                 pages = SGE_PAGE_ALIGN(frag_size) >>
839                                         SGE_PAGE_SHIFT;
840
841                         bnx2x_tpa_stop(bp, fp, tpa_info, pages,
842                                        &cqe->end_agg_cqe, comp_ring_cons);
843 #ifdef BNX2X_STOP_ON_ERROR
844                         if (bp->panic)
845                                 return 0;
846 #endif
847
848                         bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
849                         goto next_cqe;
850                 }
851                 /* non TPA */
852                 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
853                 pad = cqe_fp->placement_offset;
854                 dma_sync_single_for_cpu(&bp->pdev->dev,
855                                         dma_unmap_addr(rx_buf, mapping),
856                                         pad + RX_COPY_THRESH,
857                                         DMA_FROM_DEVICE);
858                 pad += NET_SKB_PAD;
859                 prefetch(data + pad); /* speedup eth_type_trans() */
860                 /* is this an error packet? */
861                 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
862                         DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
863                            "ERROR  flags %x  rx packet %u\n",
864                            cqe_fp_flags, sw_comp_cons);
865                         bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
866                         goto reuse_rx;
867                 }
868
869                 /* Since we don't have a jumbo ring
870                  * copy small packets if mtu > 1500
871                  */
872                 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
873                     (len <= RX_COPY_THRESH)) {
874                         skb = netdev_alloc_skb_ip_align(bp->dev, len);
875                         if (skb == NULL) {
876                                 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
877                                    "ERROR  packet dropped because of alloc failure\n");
878                                 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
879                                 goto reuse_rx;
880                         }
881                         memcpy(skb->data, data + pad, len);
882                         bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
883                 } else {
884                         if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
885                                 dma_unmap_single(&bp->pdev->dev,
886                                                  dma_unmap_addr(rx_buf, mapping),
887                                                  fp->rx_buf_size,
888                                                  DMA_FROM_DEVICE);
889                                 skb = build_skb(data, fp->rx_frag_size);
890                                 if (unlikely(!skb)) {
891                                         bnx2x_frag_free(fp, data);
892                                         bnx2x_fp_qstats(bp, fp)->
893                                                         rx_skb_alloc_failed++;
894                                         goto next_rx;
895                                 }
896                                 skb_reserve(skb, pad);
897                         } else {
898                                 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
899                                    "ERROR  packet dropped because of alloc failure\n");
900                                 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
901 reuse_rx:
902                                 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
903                                 goto next_rx;
904                         }
905                 }
906
907                 skb_put(skb, len);
908                 skb->protocol = eth_type_trans(skb, bp->dev);
909
910                 /* Set Toeplitz hash for a none-LRO skb */
911                 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
912                 skb->l4_rxhash = l4_rxhash;
913
914                 skb_checksum_none_assert(skb);
915
916                 if (bp->dev->features & NETIF_F_RXCSUM)
917                         bnx2x_csum_validate(skb, cqe, fp,
918                                             bnx2x_fp_qstats(bp, fp));
919
920                 skb_record_rx_queue(skb, fp->rx_queue);
921
922                 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
923                     PARSING_FLAGS_VLAN)
924                         __vlan_hwaccel_put_tag(skb,
925                                                le16_to_cpu(cqe_fp->vlan_tag));
926                 napi_gro_receive(&fp->napi, skb);
927
928
929 next_rx:
930                 rx_buf->data = NULL;
931
932                 bd_cons = NEXT_RX_IDX(bd_cons);
933                 bd_prod = NEXT_RX_IDX(bd_prod);
934                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
935                 rx_pkt++;
936 next_cqe:
937                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
938                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
939
940                 if (rx_pkt == budget)
941                         break;
942         } /* while */
943
944         fp->rx_bd_cons = bd_cons;
945         fp->rx_bd_prod = bd_prod_fw;
946         fp->rx_comp_cons = sw_comp_cons;
947         fp->rx_comp_prod = sw_comp_prod;
948
949         /* Update producers */
950         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
951                              fp->rx_sge_prod);
952
953         fp->rx_pkt += rx_pkt;
954         fp->rx_calls++;
955
956         return rx_pkt;
957 }
958
959 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
960 {
961         struct bnx2x_fastpath *fp = fp_cookie;
962         struct bnx2x *bp = fp->bp;
963         u8 cos;
964
965         DP(NETIF_MSG_INTR,
966            "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
967            fp->index, fp->fw_sb_id, fp->igu_sb_id);
968         bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
969
970 #ifdef BNX2X_STOP_ON_ERROR
971         if (unlikely(bp->panic))
972                 return IRQ_HANDLED;
973 #endif
974
975         /* Handle Rx and Tx according to MSI-X vector */
976         prefetch(fp->rx_cons_sb);
977
978         for_each_cos_in_tx_queue(fp, cos)
979                 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
980
981         prefetch(&fp->sb_running_index[SM_RX_ID]);
982         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
983
984         return IRQ_HANDLED;
985 }
986
987 /* HW Lock for shared dual port PHYs */
988 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
989 {
990         mutex_lock(&bp->port.phy_mutex);
991
992         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
993 }
994
995 void bnx2x_release_phy_lock(struct bnx2x *bp)
996 {
997         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
998
999         mutex_unlock(&bp->port.phy_mutex);
1000 }
1001
1002 /* calculates MF speed according to current linespeed and MF configuration */
1003 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1004 {
1005         u16 line_speed = bp->link_vars.line_speed;
1006         if (IS_MF(bp)) {
1007                 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1008                                                    bp->mf_config[BP_VN(bp)]);
1009
1010                 /* Calculate the current MAX line speed limit for the MF
1011                  * devices
1012                  */
1013                 if (IS_MF_SI(bp))
1014                         line_speed = (line_speed * maxCfg) / 100;
1015                 else { /* SD mode */
1016                         u16 vn_max_rate = maxCfg * 100;
1017
1018                         if (vn_max_rate < line_speed)
1019                                 line_speed = vn_max_rate;
1020                 }
1021         }
1022
1023         return line_speed;
1024 }
1025
1026 /**
1027  * bnx2x_fill_report_data - fill link report data to report
1028  *
1029  * @bp:         driver handle
1030  * @data:       link state to update
1031  *
1032  * It uses a none-atomic bit operations because is called under the mutex.
1033  */
1034 static void bnx2x_fill_report_data(struct bnx2x *bp,
1035                                    struct bnx2x_link_report_data *data)
1036 {
1037         u16 line_speed = bnx2x_get_mf_speed(bp);
1038
1039         memset(data, 0, sizeof(*data));
1040
1041         /* Fill the report data: efective line speed */
1042         data->line_speed = line_speed;
1043
1044         /* Link is down */
1045         if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1046                 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1047                           &data->link_report_flags);
1048
1049         /* Full DUPLEX */
1050         if (bp->link_vars.duplex == DUPLEX_FULL)
1051                 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1052
1053         /* Rx Flow Control is ON */
1054         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1055                 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1056
1057         /* Tx Flow Control is ON */
1058         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1059                 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1060 }
1061
1062 /**
1063  * bnx2x_link_report - report link status to OS.
1064  *
1065  * @bp:         driver handle
1066  *
1067  * Calls the __bnx2x_link_report() under the same locking scheme
1068  * as a link/PHY state managing code to ensure a consistent link
1069  * reporting.
1070  */
1071
1072 void bnx2x_link_report(struct bnx2x *bp)
1073 {
1074         bnx2x_acquire_phy_lock(bp);
1075         __bnx2x_link_report(bp);
1076         bnx2x_release_phy_lock(bp);
1077 }
1078
1079 /**
1080  * __bnx2x_link_report - report link status to OS.
1081  *
1082  * @bp:         driver handle
1083  *
1084  * None atomic inmlementation.
1085  * Should be called under the phy_lock.
1086  */
1087 void __bnx2x_link_report(struct bnx2x *bp)
1088 {
1089         struct bnx2x_link_report_data cur_data;
1090
1091         /* reread mf_cfg */
1092         if (!CHIP_IS_E1(bp))
1093                 bnx2x_read_mf_cfg(bp);
1094
1095         /* Read the current link report info */
1096         bnx2x_fill_report_data(bp, &cur_data);
1097
1098         /* Don't report link down or exactly the same link status twice */
1099         if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1100             (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1101                       &bp->last_reported_link.link_report_flags) &&
1102              test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1103                       &cur_data.link_report_flags)))
1104                 return;
1105
1106         bp->link_cnt++;
1107
1108         /* We are going to report a new link parameters now -
1109          * remember the current data for the next time.
1110          */
1111         memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1112
1113         if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1114                      &cur_data.link_report_flags)) {
1115                 netif_carrier_off(bp->dev);
1116                 netdev_err(bp->dev, "NIC Link is Down\n");
1117                 return;
1118         } else {
1119                 const char *duplex;
1120                 const char *flow;
1121
1122                 netif_carrier_on(bp->dev);
1123
1124                 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1125                                        &cur_data.link_report_flags))
1126                         duplex = "full";
1127                 else
1128                         duplex = "half";
1129
1130                 /* Handle the FC at the end so that only these flags would be
1131                  * possibly set. This way we may easily check if there is no FC
1132                  * enabled.
1133                  */
1134                 if (cur_data.link_report_flags) {
1135                         if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1136                                      &cur_data.link_report_flags)) {
1137                                 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1138                                      &cur_data.link_report_flags))
1139                                         flow = "ON - receive & transmit";
1140                                 else
1141                                         flow = "ON - receive";
1142                         } else {
1143                                 flow = "ON - transmit";
1144                         }
1145                 } else {
1146                         flow = "none";
1147                 }
1148                 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1149                             cur_data.line_speed, duplex, flow);
1150         }
1151 }
1152
1153 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1154 {
1155         int i;
1156
1157         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1158                 struct eth_rx_sge *sge;
1159
1160                 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1161                 sge->addr_hi =
1162                         cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1163                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1164
1165                 sge->addr_lo =
1166                         cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1167                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1168         }
1169 }
1170
1171 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1172                                 struct bnx2x_fastpath *fp, int last)
1173 {
1174         int i;
1175
1176         for (i = 0; i < last; i++) {
1177                 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1178                 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1179                 u8 *data = first_buf->data;
1180
1181                 if (data == NULL) {
1182                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1183                         continue;
1184                 }
1185                 if (tpa_info->tpa_state == BNX2X_TPA_START)
1186                         dma_unmap_single(&bp->pdev->dev,
1187                                          dma_unmap_addr(first_buf, mapping),
1188                                          fp->rx_buf_size, DMA_FROM_DEVICE);
1189                 bnx2x_frag_free(fp, data);
1190                 first_buf->data = NULL;
1191         }
1192 }
1193
1194 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1195 {
1196         int j;
1197
1198         for_each_rx_queue_cnic(bp, j) {
1199                 struct bnx2x_fastpath *fp = &bp->fp[j];
1200
1201                 fp->rx_bd_cons = 0;
1202
1203                 /* Activate BD ring */
1204                 /* Warning!
1205                  * this will generate an interrupt (to the TSTORM)
1206                  * must only be done after chip is initialized
1207                  */
1208                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1209                                      fp->rx_sge_prod);
1210         }
1211 }
1212
1213 void bnx2x_init_rx_rings(struct bnx2x *bp)
1214 {
1215         int func = BP_FUNC(bp);
1216         u16 ring_prod;
1217         int i, j;
1218
1219         /* Allocate TPA resources */
1220         for_each_eth_queue(bp, j) {
1221                 struct bnx2x_fastpath *fp = &bp->fp[j];
1222
1223                 DP(NETIF_MSG_IFUP,
1224                    "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1225
1226                 if (!fp->disable_tpa) {
1227                         /* Fill the per-aggregtion pool */
1228                         for (i = 0; i < MAX_AGG_QS(bp); i++) {
1229                                 struct bnx2x_agg_info *tpa_info =
1230                                         &fp->tpa_info[i];
1231                                 struct sw_rx_bd *first_buf =
1232                                         &tpa_info->first_buf;
1233
1234                                 first_buf->data = bnx2x_frag_alloc(fp);
1235                                 if (!first_buf->data) {
1236                                         BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1237                                                   j);
1238                                         bnx2x_free_tpa_pool(bp, fp, i);
1239                                         fp->disable_tpa = 1;
1240                                         break;
1241                                 }
1242                                 dma_unmap_addr_set(first_buf, mapping, 0);
1243                                 tpa_info->tpa_state = BNX2X_TPA_STOP;
1244                         }
1245
1246                         /* "next page" elements initialization */
1247                         bnx2x_set_next_page_sgl(fp);
1248
1249                         /* set SGEs bit mask */
1250                         bnx2x_init_sge_ring_bit_mask(fp);
1251
1252                         /* Allocate SGEs and initialize the ring elements */
1253                         for (i = 0, ring_prod = 0;
1254                              i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1255
1256                                 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1257                                         BNX2X_ERR("was only able to allocate %d rx sges\n",
1258                                                   i);
1259                                         BNX2X_ERR("disabling TPA for queue[%d]\n",
1260                                                   j);
1261                                         /* Cleanup already allocated elements */
1262                                         bnx2x_free_rx_sge_range(bp, fp,
1263                                                                 ring_prod);
1264                                         bnx2x_free_tpa_pool(bp, fp,
1265                                                             MAX_AGG_QS(bp));
1266                                         fp->disable_tpa = 1;
1267                                         ring_prod = 0;
1268                                         break;
1269                                 }
1270                                 ring_prod = NEXT_SGE_IDX(ring_prod);
1271                         }
1272
1273                         fp->rx_sge_prod = ring_prod;
1274                 }
1275         }
1276
1277         for_each_eth_queue(bp, j) {
1278                 struct bnx2x_fastpath *fp = &bp->fp[j];
1279
1280                 fp->rx_bd_cons = 0;
1281
1282                 /* Activate BD ring */
1283                 /* Warning!
1284                  * this will generate an interrupt (to the TSTORM)
1285                  * must only be done after chip is initialized
1286                  */
1287                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1288                                      fp->rx_sge_prod);
1289
1290                 if (j != 0)
1291                         continue;
1292
1293                 if (CHIP_IS_E1(bp)) {
1294                         REG_WR(bp, BAR_USTRORM_INTMEM +
1295                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1296                                U64_LO(fp->rx_comp_mapping));
1297                         REG_WR(bp, BAR_USTRORM_INTMEM +
1298                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1299                                U64_HI(fp->rx_comp_mapping));
1300                 }
1301         }
1302 }
1303
1304 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1305 {
1306         u8 cos;
1307         struct bnx2x *bp = fp->bp;
1308
1309         for_each_cos_in_tx_queue(fp, cos) {
1310                 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1311                 unsigned pkts_compl = 0, bytes_compl = 0;
1312
1313                 u16 sw_prod = txdata->tx_pkt_prod;
1314                 u16 sw_cons = txdata->tx_pkt_cons;
1315
1316                 while (sw_cons != sw_prod) {
1317                         bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1318                                           &pkts_compl, &bytes_compl);
1319                         sw_cons++;
1320                 }
1321
1322                 netdev_tx_reset_queue(
1323                         netdev_get_tx_queue(bp->dev,
1324                                             txdata->txq_index));
1325         }
1326 }
1327
1328 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1329 {
1330         int i;
1331
1332         for_each_tx_queue_cnic(bp, i) {
1333                 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1334         }
1335 }
1336
1337 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1338 {
1339         int i;
1340
1341         for_each_eth_queue(bp, i) {
1342                 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1343         }
1344 }
1345
1346 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1347 {
1348         struct bnx2x *bp = fp->bp;
1349         int i;
1350
1351         /* ring wasn't allocated */
1352         if (fp->rx_buf_ring == NULL)
1353                 return;
1354
1355         for (i = 0; i < NUM_RX_BD; i++) {
1356                 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1357                 u8 *data = rx_buf->data;
1358
1359                 if (data == NULL)
1360                         continue;
1361                 dma_unmap_single(&bp->pdev->dev,
1362                                  dma_unmap_addr(rx_buf, mapping),
1363                                  fp->rx_buf_size, DMA_FROM_DEVICE);
1364
1365                 rx_buf->data = NULL;
1366                 bnx2x_frag_free(fp, data);
1367         }
1368 }
1369
1370 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1371 {
1372         int j;
1373
1374         for_each_rx_queue_cnic(bp, j) {
1375                 bnx2x_free_rx_bds(&bp->fp[j]);
1376         }
1377 }
1378
1379 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1380 {
1381         int j;
1382
1383         for_each_eth_queue(bp, j) {
1384                 struct bnx2x_fastpath *fp = &bp->fp[j];
1385
1386                 bnx2x_free_rx_bds(fp);
1387
1388                 if (!fp->disable_tpa)
1389                         bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1390         }
1391 }
1392
1393 void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1394 {
1395         bnx2x_free_tx_skbs_cnic(bp);
1396         bnx2x_free_rx_skbs_cnic(bp);
1397 }
1398
1399 void bnx2x_free_skbs(struct bnx2x *bp)
1400 {
1401         bnx2x_free_tx_skbs(bp);
1402         bnx2x_free_rx_skbs(bp);
1403 }
1404
1405 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1406 {
1407         /* load old values */
1408         u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1409
1410         if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1411                 /* leave all but MAX value */
1412                 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1413
1414                 /* set new MAX value */
1415                 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1416                                 & FUNC_MF_CFG_MAX_BW_MASK;
1417
1418                 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1419         }
1420 }
1421
1422 /**
1423  * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1424  *
1425  * @bp:         driver handle
1426  * @nvecs:      number of vectors to be released
1427  */
1428 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1429 {
1430         int i, offset = 0;
1431
1432         if (nvecs == offset)
1433                 return;
1434         free_irq(bp->msix_table[offset].vector, bp->dev);
1435         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1436            bp->msix_table[offset].vector);
1437         offset++;
1438
1439         if (CNIC_SUPPORT(bp)) {
1440                 if (nvecs == offset)
1441                         return;
1442                 offset++;
1443         }
1444
1445         for_each_eth_queue(bp, i) {
1446                 if (nvecs == offset)
1447                         return;
1448                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1449                    i, bp->msix_table[offset].vector);
1450
1451                 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1452         }
1453 }
1454
1455 void bnx2x_free_irq(struct bnx2x *bp)
1456 {
1457         if (bp->flags & USING_MSIX_FLAG &&
1458             !(bp->flags & USING_SINGLE_MSIX_FLAG))
1459                 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1460                                      CNIC_SUPPORT(bp) + 1);
1461         else
1462                 free_irq(bp->dev->irq, bp->dev);
1463 }
1464
1465 int bnx2x_enable_msix(struct bnx2x *bp)
1466 {
1467         int msix_vec = 0, i, rc, req_cnt;
1468
1469         bp->msix_table[msix_vec].entry = msix_vec;
1470         BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1471            bp->msix_table[0].entry);
1472         msix_vec++;
1473
1474         /* Cnic requires an msix vector for itself */
1475         if (CNIC_SUPPORT(bp)) {
1476                 bp->msix_table[msix_vec].entry = msix_vec;
1477                 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1478                                msix_vec, bp->msix_table[msix_vec].entry);
1479                 msix_vec++;
1480         }
1481
1482         /* We need separate vectors for ETH queues only (not FCoE) */
1483         for_each_eth_queue(bp, i) {
1484                 bp->msix_table[msix_vec].entry = msix_vec;
1485                 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1486                                msix_vec, msix_vec, i);
1487                 msix_vec++;
1488         }
1489
1490         req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp) + 1;
1491
1492         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1493
1494         /*
1495          * reconfigure number of tx/rx queues according to available
1496          * MSI-X vectors
1497          */
1498         if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
1499                 /* how less vectors we will have? */
1500                 int diff = req_cnt - rc;
1501
1502                 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1503
1504                 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1505
1506                 if (rc) {
1507                         BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1508                         goto no_msix;
1509                 }
1510                 /*
1511                  * decrease number of queues by number of unallocated entries
1512                  */
1513                 bp->num_ethernet_queues -= diff;
1514                 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1515
1516                 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1517                                bp->num_queues);
1518         } else if (rc > 0) {
1519                 /* Get by with single vector */
1520                 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1521                 if (rc) {
1522                         BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1523                                        rc);
1524                         goto no_msix;
1525                 }
1526
1527                 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1528                 bp->flags |= USING_SINGLE_MSIX_FLAG;
1529
1530                 BNX2X_DEV_INFO("set number of queues to 1\n");
1531                 bp->num_ethernet_queues = 1;
1532                 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1533         } else if (rc < 0) {
1534                 BNX2X_DEV_INFO("MSI-X is not attainable  rc %d\n", rc);
1535                 goto no_msix;
1536         }
1537
1538         bp->flags |= USING_MSIX_FLAG;
1539
1540         return 0;
1541
1542 no_msix:
1543         /* fall to INTx if not enough memory */
1544         if (rc == -ENOMEM)
1545                 bp->flags |= DISABLE_MSI_FLAG;
1546
1547         return rc;
1548 }
1549
1550 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1551 {
1552         int i, rc, offset = 0;
1553
1554         rc = request_irq(bp->msix_table[offset++].vector,
1555                          bnx2x_msix_sp_int, 0,
1556                          bp->dev->name, bp->dev);
1557         if (rc) {
1558                 BNX2X_ERR("request sp irq failed\n");
1559                 return -EBUSY;
1560         }
1561
1562         if (CNIC_SUPPORT(bp))
1563                 offset++;
1564
1565         for_each_eth_queue(bp, i) {
1566                 struct bnx2x_fastpath *fp = &bp->fp[i];
1567                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1568                          bp->dev->name, i);
1569
1570                 rc = request_irq(bp->msix_table[offset].vector,
1571                                  bnx2x_msix_fp_int, 0, fp->name, fp);
1572                 if (rc) {
1573                         BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
1574                               bp->msix_table[offset].vector, rc);
1575                         bnx2x_free_msix_irqs(bp, offset);
1576                         return -EBUSY;
1577                 }
1578
1579                 offset++;
1580         }
1581
1582         i = BNX2X_NUM_ETH_QUEUES(bp);
1583         offset = 1 + CNIC_SUPPORT(bp);
1584         netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
1585                bp->msix_table[0].vector,
1586                0, bp->msix_table[offset].vector,
1587                i - 1, bp->msix_table[offset + i - 1].vector);
1588
1589         return 0;
1590 }
1591
1592 int bnx2x_enable_msi(struct bnx2x *bp)
1593 {
1594         int rc;
1595
1596         rc = pci_enable_msi(bp->pdev);
1597         if (rc) {
1598                 BNX2X_DEV_INFO("MSI is not attainable\n");
1599                 return -1;
1600         }
1601         bp->flags |= USING_MSI_FLAG;
1602
1603         return 0;
1604 }
1605
1606 static int bnx2x_req_irq(struct bnx2x *bp)
1607 {
1608         unsigned long flags;
1609         unsigned int irq;
1610
1611         if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1612                 flags = 0;
1613         else
1614                 flags = IRQF_SHARED;
1615
1616         if (bp->flags & USING_MSIX_FLAG)
1617                 irq = bp->msix_table[0].vector;
1618         else
1619                 irq = bp->pdev->irq;
1620
1621         return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1622 }
1623
1624 static int bnx2x_setup_irqs(struct bnx2x *bp)
1625 {
1626         int rc = 0;
1627         if (bp->flags & USING_MSIX_FLAG &&
1628             !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1629                 rc = bnx2x_req_msix_irqs(bp);
1630                 if (rc)
1631                         return rc;
1632         } else {
1633                 bnx2x_ack_int(bp);
1634                 rc = bnx2x_req_irq(bp);
1635                 if (rc) {
1636                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
1637                         return rc;
1638                 }
1639                 if (bp->flags & USING_MSI_FLAG) {
1640                         bp->dev->irq = bp->pdev->irq;
1641                         netdev_info(bp->dev, "using MSI IRQ %d\n",
1642                                     bp->dev->irq);
1643                 }
1644                 if (bp->flags & USING_MSIX_FLAG) {
1645                         bp->dev->irq = bp->msix_table[0].vector;
1646                         netdev_info(bp->dev, "using MSIX IRQ %d\n",
1647                                     bp->dev->irq);
1648                 }
1649         }
1650
1651         return 0;
1652 }
1653
1654 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1655 {
1656         int i;
1657
1658         for_each_rx_queue_cnic(bp, i)
1659                 napi_enable(&bnx2x_fp(bp, i, napi));
1660 }
1661
1662 static void bnx2x_napi_enable(struct bnx2x *bp)
1663 {
1664         int i;
1665
1666         for_each_eth_queue(bp, i)
1667                 napi_enable(&bnx2x_fp(bp, i, napi));
1668 }
1669
1670 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1671 {
1672         int i;
1673
1674         for_each_rx_queue_cnic(bp, i)
1675                 napi_disable(&bnx2x_fp(bp, i, napi));
1676 }
1677
1678 static void bnx2x_napi_disable(struct bnx2x *bp)
1679 {
1680         int i;
1681
1682         for_each_eth_queue(bp, i)
1683                 napi_disable(&bnx2x_fp(bp, i, napi));
1684 }
1685
1686 void bnx2x_netif_start(struct bnx2x *bp)
1687 {
1688         if (netif_running(bp->dev)) {
1689                 bnx2x_napi_enable(bp);
1690                 if (CNIC_LOADED(bp))
1691                         bnx2x_napi_enable_cnic(bp);
1692                 bnx2x_int_enable(bp);
1693                 if (bp->state == BNX2X_STATE_OPEN)
1694                         netif_tx_wake_all_queues(bp->dev);
1695         }
1696 }
1697
1698 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1699 {
1700         bnx2x_int_disable_sync(bp, disable_hw);
1701         bnx2x_napi_disable(bp);
1702         if (CNIC_LOADED(bp))
1703                 bnx2x_napi_disable_cnic(bp);
1704 }
1705
1706 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1707 {
1708         struct bnx2x *bp = netdev_priv(dev);
1709
1710         if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1711                 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1712                 u16 ether_type = ntohs(hdr->h_proto);
1713
1714                 /* Skip VLAN tag if present */
1715                 if (ether_type == ETH_P_8021Q) {
1716                         struct vlan_ethhdr *vhdr =
1717                                 (struct vlan_ethhdr *)skb->data;
1718
1719                         ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1720                 }
1721
1722                 /* If ethertype is FCoE or FIP - use FCoE ring */
1723                 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1724                         return bnx2x_fcoe_tx(bp, txq_index);
1725         }
1726
1727         /* select a non-FCoE queue */
1728         return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1729 }
1730
1731
1732 void bnx2x_set_num_queues(struct bnx2x *bp)
1733 {
1734         /* RSS queues */
1735         bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1736
1737         /* override in STORAGE SD modes */
1738         if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1739                 bp->num_ethernet_queues = 1;
1740
1741         /* Add special queues */
1742         bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1743         bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1744
1745         BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1746 }
1747
1748 /**
1749  * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1750  *
1751  * @bp:         Driver handle
1752  *
1753  * We currently support for at most 16 Tx queues for each CoS thus we will
1754  * allocate a multiple of 16 for ETH L2 rings according to the value of the
1755  * bp->max_cos.
1756  *
1757  * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1758  * index after all ETH L2 indices.
1759  *
1760  * If the actual number of Tx queues (for each CoS) is less than 16 then there
1761  * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1762  * 16..31,...) with indicies that are not coupled with any real Tx queue.
1763  *
1764  * The proper configuration of skb->queue_mapping is handled by
1765  * bnx2x_select_queue() and __skb_tx_hash().
1766  *
1767  * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1768  * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1769  */
1770 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1771 {
1772         int rc, tx, rx;
1773
1774         tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1775         rx = BNX2X_NUM_ETH_QUEUES(bp);
1776
1777 /* account for fcoe queue */
1778         if (include_cnic && !NO_FCOE(bp)) {
1779                 rx++;
1780                 tx++;
1781         }
1782
1783         rc = netif_set_real_num_tx_queues(bp->dev, tx);
1784         if (rc) {
1785                 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1786                 return rc;
1787         }
1788         rc = netif_set_real_num_rx_queues(bp->dev, rx);
1789         if (rc) {
1790                 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1791                 return rc;
1792         }
1793
1794         DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1795                           tx, rx);
1796
1797         return rc;
1798 }
1799
1800 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1801 {
1802         int i;
1803
1804         for_each_queue(bp, i) {
1805                 struct bnx2x_fastpath *fp = &bp->fp[i];
1806                 u32 mtu;
1807
1808                 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1809                 if (IS_FCOE_IDX(i))
1810                         /*
1811                          * Although there are no IP frames expected to arrive to
1812                          * this ring we still want to add an
1813                          * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1814                          * overrun attack.
1815                          */
1816                         mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1817                 else
1818                         mtu = bp->dev->mtu;
1819                 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1820                                   IP_HEADER_ALIGNMENT_PADDING +
1821                                   ETH_OVREHEAD +
1822                                   mtu +
1823                                   BNX2X_FW_RX_ALIGN_END;
1824                 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
1825                 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1826                         fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1827                 else
1828                         fp->rx_frag_size = 0;
1829         }
1830 }
1831
1832 static int bnx2x_init_rss_pf(struct bnx2x *bp)
1833 {
1834         int i;
1835         u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1836
1837         /* Prepare the initial contents fo the indirection table if RSS is
1838          * enabled
1839          */
1840         for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1841                 bp->rss_conf_obj.ind_table[i] =
1842                         bp->fp->cl_id +
1843                         ethtool_rxfh_indir_default(i, num_eth_queues);
1844
1845         /*
1846          * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1847          * per-port, so if explicit configuration is needed , do it only
1848          * for a PMF.
1849          *
1850          * For 57712 and newer on the other hand it's a per-function
1851          * configuration.
1852          */
1853         return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
1854 }
1855
1856 int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1857                         bool config_hash)
1858 {
1859         struct bnx2x_config_rss_params params = {NULL};
1860
1861         /* Although RSS is meaningless when there is a single HW queue we
1862          * still need it enabled in order to have HW Rx hash generated.
1863          *
1864          * if (!is_eth_multi(bp))
1865          *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
1866          */
1867
1868         params.rss_obj = rss_obj;
1869
1870         __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1871
1872         __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1873
1874         /* RSS configuration */
1875         __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1876         __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1877         __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1878         __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1879         if (rss_obj->udp_rss_v4)
1880                 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
1881         if (rss_obj->udp_rss_v6)
1882                 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
1883
1884         /* Hash bits */
1885         params.rss_result_mask = MULTI_MASK;
1886
1887         memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
1888
1889         if (config_hash) {
1890                 /* RSS keys */
1891                 prandom_bytes(params.rss_key, sizeof(params.rss_key));
1892                 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
1893         }
1894
1895         return bnx2x_config_rss(bp, &params);
1896 }
1897
1898 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1899 {
1900         struct bnx2x_func_state_params func_params = {NULL};
1901
1902         /* Prepare parameters for function state transitions */
1903         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1904
1905         func_params.f_obj = &bp->func_obj;
1906         func_params.cmd = BNX2X_F_CMD_HW_INIT;
1907
1908         func_params.params.hw_init.load_phase = load_code;
1909
1910         return bnx2x_func_state_change(bp, &func_params);
1911 }
1912
1913 /*
1914  * Cleans the object that have internal lists without sending
1915  * ramrods. Should be run when interrutps are disabled.
1916  */
1917 static void bnx2x_squeeze_objects(struct bnx2x *bp)
1918 {
1919         int rc;
1920         unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1921         struct bnx2x_mcast_ramrod_params rparam = {NULL};
1922         struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
1923
1924         /***************** Cleanup MACs' object first *************************/
1925
1926         /* Wait for completion of requested */
1927         __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1928         /* Perform a dry cleanup */
1929         __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1930
1931         /* Clean ETH primary MAC */
1932         __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1933         rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
1934                                  &ramrod_flags);
1935         if (rc != 0)
1936                 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1937
1938         /* Cleanup UC list */
1939         vlan_mac_flags = 0;
1940         __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1941         rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1942                                  &ramrod_flags);
1943         if (rc != 0)
1944                 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1945
1946         /***************** Now clean mcast object *****************************/
1947         rparam.mcast_obj = &bp->mcast_obj;
1948         __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1949
1950         /* Add a DEL command... */
1951         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1952         if (rc < 0)
1953                 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
1954                           rc);
1955
1956         /* ...and wait until all pending commands are cleared */
1957         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1958         while (rc != 0) {
1959                 if (rc < 0) {
1960                         BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1961                                   rc);
1962                         return;
1963                 }
1964
1965                 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1966         }
1967 }
1968
1969 #ifndef BNX2X_STOP_ON_ERROR
1970 #define LOAD_ERROR_EXIT(bp, label) \
1971         do { \
1972                 (bp)->state = BNX2X_STATE_ERROR; \
1973                 goto label; \
1974         } while (0)
1975
1976 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
1977         do { \
1978                 bp->cnic_loaded = false; \
1979                 goto label; \
1980         } while (0)
1981 #else /*BNX2X_STOP_ON_ERROR*/
1982 #define LOAD_ERROR_EXIT(bp, label) \
1983         do { \
1984                 (bp)->state = BNX2X_STATE_ERROR; \
1985                 (bp)->panic = 1; \
1986                 return -EBUSY; \
1987         } while (0)
1988 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
1989         do { \
1990                 bp->cnic_loaded = false; \
1991                 (bp)->panic = 1; \
1992                 return -EBUSY; \
1993         } while (0)
1994 #endif /*BNX2X_STOP_ON_ERROR*/
1995
1996 bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1997 {
1998         /* build FW version dword */
1999         u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2000                     (BCM_5710_FW_MINOR_VERSION << 8) +
2001                     (BCM_5710_FW_REVISION_VERSION << 16) +
2002                     (BCM_5710_FW_ENGINEERING_VERSION << 24);
2003
2004         /* read loaded FW from chip */
2005         u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2006
2007         DP(NETIF_MSG_IFUP, "loaded fw %x, my fw %x\n", loaded_fw, my_fw);
2008
2009         if (loaded_fw != my_fw) {
2010                 if (is_err)
2011                         BNX2X_ERR("bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n",
2012                                   loaded_fw, my_fw);
2013                 return false;
2014         }
2015
2016         return true;
2017 }
2018
2019 /**
2020  * bnx2x_bz_fp - zero content of the fastpath structure.
2021  *
2022  * @bp:         driver handle
2023  * @index:      fastpath index to be zeroed
2024  *
2025  * Makes sure the contents of the bp->fp[index].napi is kept
2026  * intact.
2027  */
2028 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2029 {
2030         struct bnx2x_fastpath *fp = &bp->fp[index];
2031         struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
2032
2033         int cos;
2034         struct napi_struct orig_napi = fp->napi;
2035         struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2036         /* bzero bnx2x_fastpath contents */
2037         if (bp->stats_init) {
2038                 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
2039                 memset(fp, 0, sizeof(*fp));
2040         } else {
2041                 /* Keep Queue statistics */
2042                 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
2043                 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
2044
2045                 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
2046                                           GFP_KERNEL);
2047                 if (tmp_eth_q_stats)
2048                         memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
2049                                sizeof(struct bnx2x_eth_q_stats));
2050
2051                 tmp_eth_q_stats_old =
2052                         kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
2053                                 GFP_KERNEL);
2054                 if (tmp_eth_q_stats_old)
2055                         memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
2056                                sizeof(struct bnx2x_eth_q_stats_old));
2057
2058                 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
2059                 memset(fp, 0, sizeof(*fp));
2060
2061                 if (tmp_eth_q_stats) {
2062                         memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
2063                                sizeof(struct bnx2x_eth_q_stats));
2064                         kfree(tmp_eth_q_stats);
2065                 }
2066
2067                 if (tmp_eth_q_stats_old) {
2068                         memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
2069                                sizeof(struct bnx2x_eth_q_stats_old));
2070                         kfree(tmp_eth_q_stats_old);
2071                 }
2072
2073         }
2074
2075         /* Restore the NAPI object as it has been already initialized */
2076         fp->napi = orig_napi;
2077         fp->tpa_info = orig_tpa_info;
2078         fp->bp = bp;
2079         fp->index = index;
2080         if (IS_ETH_FP(fp))
2081                 fp->max_cos = bp->max_cos;
2082         else
2083                 /* Special queues support only one CoS */
2084                 fp->max_cos = 1;
2085
2086         /* Init txdata pointers */
2087         if (IS_FCOE_FP(fp))
2088                 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2089         if (IS_ETH_FP(fp))
2090                 for_each_cos_in_tx_queue(fp, cos)
2091                         fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2092                                 BNX2X_NUM_ETH_QUEUES(bp) + index];
2093
2094         /*
2095          * set the tpa flag for each queue. The tpa flag determines the queue
2096          * minimal size so it must be set prior to queue memory allocation
2097          */
2098         fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2099                                   (bp->flags & GRO_ENABLE_FLAG &&
2100                                    bnx2x_mtu_allows_gro(bp->dev->mtu)));
2101         if (bp->flags & TPA_ENABLE_FLAG)
2102                 fp->mode = TPA_MODE_LRO;
2103         else if (bp->flags & GRO_ENABLE_FLAG)
2104                 fp->mode = TPA_MODE_GRO;
2105
2106         /* We don't want TPA on an FCoE L2 ring */
2107         if (IS_FCOE_FP(fp))
2108                 fp->disable_tpa = 1;
2109 }
2110
2111 int bnx2x_load_cnic(struct bnx2x *bp)
2112 {
2113         int i, rc, port = BP_PORT(bp);
2114
2115         DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2116
2117         mutex_init(&bp->cnic_mutex);
2118
2119         rc = bnx2x_alloc_mem_cnic(bp);
2120         if (rc) {
2121                 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2122                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2123         }
2124
2125         rc = bnx2x_alloc_fp_mem_cnic(bp);
2126         if (rc) {
2127                 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2128                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2129         }
2130
2131         /* Update the number of queues with the cnic queues */
2132         rc = bnx2x_set_real_num_queues(bp, 1);
2133         if (rc) {
2134                 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2135                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2136         }
2137
2138         /* Add all CNIC NAPI objects */
2139         bnx2x_add_all_napi_cnic(bp);
2140         DP(NETIF_MSG_IFUP, "cnic napi added\n");
2141         bnx2x_napi_enable_cnic(bp);
2142
2143         rc = bnx2x_init_hw_func_cnic(bp);
2144         if (rc)
2145                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2146
2147         bnx2x_nic_init_cnic(bp);
2148
2149         /* Enable Timer scan */
2150         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2151
2152         for_each_cnic_queue(bp, i) {
2153                 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2154                 if (rc) {
2155                         BNX2X_ERR("Queue setup failed\n");
2156                         LOAD_ERROR_EXIT(bp, load_error_cnic2);
2157                 }
2158         }
2159
2160         /* Initialize Rx filter. */
2161         netif_addr_lock_bh(bp->dev);
2162         bnx2x_set_rx_mode(bp->dev);
2163         netif_addr_unlock_bh(bp->dev);
2164
2165         /* re-read iscsi info */
2166         bnx2x_get_iscsi_info(bp);
2167         bnx2x_setup_cnic_irq_info(bp);
2168         bnx2x_setup_cnic_info(bp);
2169         bp->cnic_loaded = true;
2170         if (bp->state == BNX2X_STATE_OPEN)
2171                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2172
2173
2174         DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2175
2176         return 0;
2177
2178 #ifndef BNX2X_STOP_ON_ERROR
2179 load_error_cnic2:
2180         /* Disable Timer scan */
2181         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2182
2183 load_error_cnic1:
2184         bnx2x_napi_disable_cnic(bp);
2185         /* Update the number of queues without the cnic queues */
2186         rc = bnx2x_set_real_num_queues(bp, 0);
2187         if (rc)
2188                 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2189 load_error_cnic0:
2190         BNX2X_ERR("CNIC-related load failed\n");
2191         bnx2x_free_fp_mem_cnic(bp);
2192         bnx2x_free_mem_cnic(bp);
2193         return rc;
2194 #endif /* ! BNX2X_STOP_ON_ERROR */
2195 }
2196
2197
2198 /* must be called with rtnl_lock */
2199 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2200 {
2201         int port = BP_PORT(bp);
2202         u32 load_code;
2203         int i, rc;
2204
2205         DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2206         DP(NETIF_MSG_IFUP,
2207            "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2208
2209 #ifdef BNX2X_STOP_ON_ERROR
2210         if (unlikely(bp->panic)) {
2211                 BNX2X_ERR("Can't load NIC when there is panic\n");
2212                 return -EPERM;
2213         }
2214 #endif
2215
2216         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2217
2218         /* Set the initial link reported state to link down */
2219         bnx2x_acquire_phy_lock(bp);
2220         memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2221         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2222                 &bp->last_reported_link.link_report_flags);
2223         bnx2x_release_phy_lock(bp);
2224
2225         /* must be called before memory allocation and HW init */
2226         bnx2x_ilt_set_info(bp);
2227
2228         /*
2229          * Zero fastpath structures preserving invariants like napi, which are
2230          * allocated only once, fp index, max_cos, bp pointer.
2231          * Also set fp->disable_tpa and txdata_ptr.
2232          */
2233         DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2234         for_each_queue(bp, i)
2235                 bnx2x_bz_fp(bp, i);
2236         memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2237                                   bp->num_cnic_queues) *
2238                                   sizeof(struct bnx2x_fp_txdata));
2239
2240         bp->fcoe_init = false;
2241
2242         /* Set the receive queues buffer size */
2243         bnx2x_set_rx_buf_size(bp);
2244
2245         if (bnx2x_alloc_mem(bp))
2246                 return -ENOMEM;
2247
2248         /* As long as bnx2x_alloc_mem() may possibly update
2249          * bp->num_queues, bnx2x_set_real_num_queues() should always
2250          * come after it. At this stage cnic queues are not counted.
2251          */
2252         rc = bnx2x_set_real_num_queues(bp, 0);
2253         if (rc) {
2254                 BNX2X_ERR("Unable to set real_num_queues\n");
2255                 LOAD_ERROR_EXIT(bp, load_error0);
2256         }
2257
2258         /* configure multi cos mappings in kernel.
2259          * this configuration may be overriden by a multi class queue discipline
2260          * or by a dcbx negotiation result.
2261          */
2262         bnx2x_setup_tc(bp->dev, bp->max_cos);
2263
2264         /* Add all NAPI objects */
2265         bnx2x_add_all_napi(bp);
2266         DP(NETIF_MSG_IFUP, "napi added\n");
2267         bnx2x_napi_enable(bp);
2268
2269         /* set pf load just before approaching the MCP */
2270         bnx2x_set_pf_load(bp);
2271
2272         /* Send LOAD_REQUEST command to MCP
2273          * Returns the type of LOAD command:
2274          * if it is the first port to be initialized
2275          * common blocks should be initialized, otherwise - not
2276          */
2277         if (!BP_NOMCP(bp)) {
2278                 /* init fw_seq */
2279                 bp->fw_seq =
2280                         (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2281                          DRV_MSG_SEQ_NUMBER_MASK);
2282                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2283
2284                 /* Get current FW pulse sequence */
2285                 bp->fw_drv_pulse_wr_seq =
2286                         (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2287                          DRV_PULSE_SEQ_MASK);
2288                 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2289
2290                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
2291                                              DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
2292                 if (!load_code) {
2293                         BNX2X_ERR("MCP response failure, aborting\n");
2294                         rc = -EBUSY;
2295                         LOAD_ERROR_EXIT(bp, load_error1);
2296                 }
2297                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2298                         BNX2X_ERR("Driver load refused\n");
2299                         rc = -EBUSY; /* other port in diagnostic mode */
2300                         LOAD_ERROR_EXIT(bp, load_error1);
2301                 }
2302                 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2303                     load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2304                         /* abort nic load if version mismatch */
2305                         if (!bnx2x_test_firmware_version(bp, true)) {
2306                                 rc = -EBUSY;
2307                                 LOAD_ERROR_EXIT(bp, load_error2);
2308                         }
2309                 }
2310
2311         } else {
2312                 int path = BP_PATH(bp);
2313
2314                 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
2315                    path, load_count[path][0], load_count[path][1],
2316                    load_count[path][2]);
2317                 load_count[path][0]++;
2318                 load_count[path][1 + port]++;
2319                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
2320                    path, load_count[path][0], load_count[path][1],
2321                    load_count[path][2]);
2322                 if (load_count[path][0] == 1)
2323                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
2324                 else if (load_count[path][1 + port] == 1)
2325                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
2326                 else
2327                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
2328         }
2329
2330         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2331             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2332             (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2333                 bp->port.pmf = 1;
2334                 /*
2335                  * We need the barrier to ensure the ordering between the
2336                  * writing to bp->port.pmf here and reading it from the
2337                  * bnx2x_periodic_task().
2338                  */
2339                 smp_mb();
2340         } else
2341                 bp->port.pmf = 0;
2342
2343         DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf);
2344
2345         /* Init Function state controlling object */
2346         bnx2x__init_func_obj(bp);
2347
2348         /* Initialize HW */
2349         rc = bnx2x_init_hw(bp, load_code);
2350         if (rc) {
2351                 BNX2X_ERR("HW init failed, aborting\n");
2352                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2353                 LOAD_ERROR_EXIT(bp, load_error2);
2354         }
2355
2356         /* Connect to IRQs */
2357         rc = bnx2x_setup_irqs(bp);
2358         if (rc) {
2359                 BNX2X_ERR("IRQs setup failed\n");
2360                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2361                 LOAD_ERROR_EXIT(bp, load_error2);
2362         }
2363
2364         /* Setup NIC internals and enable interrupts */
2365         bnx2x_nic_init(bp, load_code);
2366
2367         /* Init per-function objects */
2368         bnx2x_init_bp_objs(bp);
2369
2370         if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2371             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2372             (bp->common.shmem2_base)) {
2373                 if (SHMEM2_HAS(bp, dcc_support))
2374                         SHMEM2_WR(bp, dcc_support,
2375                                   (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2376                                    SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2377                 if (SHMEM2_HAS(bp, afex_driver_support))
2378                         SHMEM2_WR(bp, afex_driver_support,
2379                                   SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2380         }
2381
2382         /* Set AFEX default VLAN tag to an invalid value */
2383         bp->afex_def_vlan_tag = -1;
2384
2385         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2386         rc = bnx2x_func_start(bp);
2387         if (rc) {
2388                 BNX2X_ERR("Function start failed!\n");
2389                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2390                 LOAD_ERROR_EXIT(bp, load_error3);
2391         }
2392
2393         /* Send LOAD_DONE command to MCP */
2394         if (!BP_NOMCP(bp)) {
2395                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2396                 if (!load_code) {
2397                         BNX2X_ERR("MCP response failure, aborting\n");
2398                         rc = -EBUSY;
2399                         LOAD_ERROR_EXIT(bp, load_error3);
2400                 }
2401         }
2402
2403         rc = bnx2x_setup_leading(bp);
2404         if (rc) {
2405                 BNX2X_ERR("Setup leading failed!\n");
2406                 LOAD_ERROR_EXIT(bp, load_error3);
2407         }
2408
2409         for_each_nondefault_eth_queue(bp, i) {
2410                 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2411                 if (rc) {
2412                         BNX2X_ERR("Queue setup failed\n");
2413                         LOAD_ERROR_EXIT(bp, load_error3);
2414                 }
2415         }
2416
2417         rc = bnx2x_init_rss_pf(bp);
2418         if (rc) {
2419                 BNX2X_ERR("PF RSS init failed\n");
2420                 LOAD_ERROR_EXIT(bp, load_error3);
2421         }
2422
2423         /* Now when Clients are configured we are ready to work */
2424         bp->state = BNX2X_STATE_OPEN;
2425
2426         /* Configure a ucast MAC */
2427         rc = bnx2x_set_eth_mac(bp, true);
2428         if (rc) {
2429                 BNX2X_ERR("Setting Ethernet MAC failed\n");
2430                 LOAD_ERROR_EXIT(bp, load_error3);
2431         }
2432
2433         if (bp->pending_max) {
2434                 bnx2x_update_max_mf_config(bp, bp->pending_max);
2435                 bp->pending_max = 0;
2436         }
2437
2438         if (bp->port.pmf)
2439                 bnx2x_initial_phy_init(bp, load_mode);
2440         bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2441
2442         /* Start fast path */
2443
2444         /* Initialize Rx filter. */
2445         netif_addr_lock_bh(bp->dev);
2446         bnx2x_set_rx_mode(bp->dev);
2447         netif_addr_unlock_bh(bp->dev);
2448
2449         /* Start the Tx */
2450         switch (load_mode) {
2451         case LOAD_NORMAL:
2452                 /* Tx queue should be only reenabled */
2453                 netif_tx_wake_all_queues(bp->dev);
2454                 break;
2455
2456         case LOAD_OPEN:
2457                 netif_tx_start_all_queues(bp->dev);
2458                 smp_mb__after_clear_bit();
2459                 break;
2460
2461         case LOAD_DIAG:
2462         case LOAD_LOOPBACK_EXT:
2463                 bp->state = BNX2X_STATE_DIAG;
2464                 break;
2465
2466         default:
2467                 break;
2468         }
2469
2470         if (bp->port.pmf)
2471                 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2472         else
2473                 bnx2x__link_status_update(bp);
2474
2475         /* start the timer */
2476         mod_timer(&bp->timer, jiffies + bp->current_interval);
2477
2478         if (CNIC_ENABLED(bp))
2479                 bnx2x_load_cnic(bp);
2480
2481         /* mark driver is loaded in shmem2 */
2482         if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2483                 u32 val;
2484                 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2485                 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2486                           val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2487                           DRV_FLAGS_CAPABILITIES_LOADED_L2);
2488         }
2489
2490         /* Wait for all pending SP commands to complete */
2491         if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2492                 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2493                 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2494                 return -EBUSY;
2495         }
2496
2497         /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2498         if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2499                 bnx2x_dcbx_init(bp, false);
2500
2501         DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2502
2503         return 0;
2504
2505 #ifndef BNX2X_STOP_ON_ERROR
2506 load_error3:
2507         bnx2x_int_disable_sync(bp, 1);
2508
2509         /* Clean queueable objects */
2510         bnx2x_squeeze_objects(bp);
2511
2512         /* Free SKBs, SGEs, TPA pool and driver internals */
2513         bnx2x_free_skbs(bp);
2514         for_each_rx_queue(bp, i)
2515                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2516
2517         /* Release IRQs */
2518         bnx2x_free_irq(bp);
2519 load_error2:
2520         if (!BP_NOMCP(bp)) {
2521                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2522                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2523         }
2524
2525         bp->port.pmf = 0;
2526 load_error1:
2527         bnx2x_napi_disable(bp);
2528         /* clear pf_load status, as it was already set */
2529         bnx2x_clear_pf_load(bp);
2530 load_error0:
2531         bnx2x_free_mem(bp);
2532
2533         return rc;
2534 #endif /* ! BNX2X_STOP_ON_ERROR */
2535 }
2536
2537 /* must be called with rtnl_lock */
2538 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2539 {
2540         int i;
2541         bool global = false;
2542
2543         DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2544
2545         /* mark driver is unloaded in shmem2 */
2546         if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2547                 u32 val;
2548                 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2549                 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2550                           val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2551         }
2552
2553         if ((bp->state == BNX2X_STATE_CLOSED) ||
2554             (bp->state == BNX2X_STATE_ERROR)) {
2555                 /* We can get here if the driver has been unloaded
2556                  * during parity error recovery and is either waiting for a
2557                  * leader to complete or for other functions to unload and
2558                  * then ifdown has been issued. In this case we want to
2559                  * unload and let other functions to complete a recovery
2560                  * process.
2561                  */
2562                 bp->recovery_state = BNX2X_RECOVERY_DONE;
2563                 bp->is_leader = 0;
2564                 bnx2x_release_leader_lock(bp);
2565                 smp_mb();
2566
2567                 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2568                 BNX2X_ERR("Can't unload in closed or error state\n");
2569                 return -EINVAL;
2570         }
2571
2572         /*
2573          * It's important to set the bp->state to the value different from
2574          * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2575          * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2576          */
2577         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2578         smp_mb();
2579
2580         if (CNIC_LOADED(bp))
2581                 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2582
2583         /* Stop Tx */
2584         bnx2x_tx_disable(bp);
2585         netdev_reset_tc(bp->dev);
2586
2587         bp->rx_mode = BNX2X_RX_MODE_NONE;
2588
2589         del_timer_sync(&bp->timer);
2590
2591         /* Set ALWAYS_ALIVE bit in shmem */
2592         bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2593
2594         bnx2x_drv_pulse(bp);
2595
2596         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2597         bnx2x_save_statistics(bp);
2598
2599         /* Cleanup the chip if needed */
2600         if (unload_mode != UNLOAD_RECOVERY)
2601                 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
2602         else {
2603                 /* Send the UNLOAD_REQUEST to the MCP */
2604                 bnx2x_send_unload_req(bp, unload_mode);
2605
2606                 /*
2607                  * Prevent transactions to host from the functions on the
2608                  * engine that doesn't reset global blocks in case of global
2609                  * attention once gloabl blocks are reset and gates are opened
2610                  * (the engine which leader will perform the recovery
2611                  * last).
2612                  */
2613                 if (!CHIP_IS_E1x(bp))
2614                         bnx2x_pf_disable(bp);
2615
2616                 /* Disable HW interrupts, NAPI */
2617                 bnx2x_netif_stop(bp, 1);
2618                 /* Delete all NAPI objects */
2619                 bnx2x_del_all_napi(bp);
2620                 if (CNIC_LOADED(bp))
2621                         bnx2x_del_all_napi_cnic(bp);
2622                 /* Release IRQs */
2623                 bnx2x_free_irq(bp);
2624
2625                 /* Report UNLOAD_DONE to MCP */
2626                 bnx2x_send_unload_done(bp, false);
2627         }
2628
2629         /*
2630          * At this stage no more interrupts will arrive so we may safly clean
2631          * the queueable objects here in case they failed to get cleaned so far.
2632          */
2633         bnx2x_squeeze_objects(bp);
2634
2635         /* There should be no more pending SP commands at this stage */
2636         bp->sp_state = 0;
2637
2638         bp->port.pmf = 0;
2639
2640         /* Free SKBs, SGEs, TPA pool and driver internals */
2641         bnx2x_free_skbs(bp);
2642         if (CNIC_LOADED(bp))
2643                 bnx2x_free_skbs_cnic(bp);
2644         for_each_rx_queue(bp, i)
2645                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2646
2647         if (CNIC_LOADED(bp)) {
2648                 bnx2x_free_fp_mem_cnic(bp);
2649                 bnx2x_free_mem_cnic(bp);
2650         }
2651         bnx2x_free_mem(bp);
2652
2653         bp->state = BNX2X_STATE_CLOSED;
2654         bp->cnic_loaded = false;
2655
2656         /* Check if there are pending parity attentions. If there are - set
2657          * RECOVERY_IN_PROGRESS.
2658          */
2659         if (bnx2x_chk_parity_attn(bp, &global, false)) {
2660                 bnx2x_set_reset_in_progress(bp);
2661
2662                 /* Set RESET_IS_GLOBAL if needed */
2663                 if (global)
2664                         bnx2x_set_reset_global(bp);
2665         }
2666
2667
2668         /* The last driver must disable a "close the gate" if there is no
2669          * parity attention or "process kill" pending.
2670          */
2671         if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
2672                 bnx2x_disable_close_the_gate(bp);
2673
2674         DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2675
2676         return 0;
2677 }
2678
2679 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2680 {
2681         u16 pmcsr;
2682
2683         /* If there is no power capability, silently succeed */
2684         if (!bp->pm_cap) {
2685                 BNX2X_DEV_INFO("No power capability. Breaking.\n");
2686                 return 0;
2687         }
2688
2689         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2690
2691         switch (state) {
2692         case PCI_D0:
2693                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2694                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2695                                        PCI_PM_CTRL_PME_STATUS));
2696
2697                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2698                         /* delay required during transition out of D3hot */
2699                         msleep(20);
2700                 break;
2701
2702         case PCI_D3hot:
2703                 /* If there are other clients above don't
2704                    shut down the power */
2705                 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2706                         return 0;
2707                 /* Don't shut down the power for emulation and FPGA */
2708                 if (CHIP_REV_IS_SLOW(bp))
2709                         return 0;
2710
2711                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2712                 pmcsr |= 3;
2713
2714                 if (bp->wol)
2715                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2716
2717                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2718                                       pmcsr);
2719
2720                 /* No more memory access after this point until
2721                 * device is brought back to D0.
2722                 */
2723                 break;
2724
2725         default:
2726                 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
2727                 return -EINVAL;
2728         }
2729         return 0;
2730 }
2731
2732 /*
2733  * net_device service functions
2734  */
2735 int bnx2x_poll(struct napi_struct *napi, int budget)
2736 {
2737         int work_done = 0;
2738         u8 cos;
2739         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2740                                                  napi);
2741         struct bnx2x *bp = fp->bp;
2742
2743         while (1) {
2744 #ifdef BNX2X_STOP_ON_ERROR
2745                 if (unlikely(bp->panic)) {
2746                         napi_complete(napi);
2747                         return 0;
2748                 }
2749 #endif
2750
2751                 for_each_cos_in_tx_queue(fp, cos)
2752                         if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
2753                                 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
2754
2755
2756                 if (bnx2x_has_rx_work(fp)) {
2757                         work_done += bnx2x_rx_int(fp, budget - work_done);
2758
2759                         /* must not complete if we consumed full budget */
2760                         if (work_done >= budget)
2761                                 break;
2762                 }
2763
2764                 /* Fall out from the NAPI loop if needed */
2765                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2766
2767                         /* No need to update SB for FCoE L2 ring as long as
2768                          * it's connected to the default SB and the SB
2769                          * has been updated when NAPI was scheduled.
2770                          */
2771                         if (IS_FCOE_FP(fp)) {
2772                                 napi_complete(napi);
2773                                 break;
2774                         }
2775                         bnx2x_update_fpsb_idx(fp);
2776                         /* bnx2x_has_rx_work() reads the status block,
2777                          * thus we need to ensure that status block indices
2778                          * have been actually read (bnx2x_update_fpsb_idx)
2779                          * prior to this check (bnx2x_has_rx_work) so that
2780                          * we won't write the "newer" value of the status block
2781                          * to IGU (if there was a DMA right after
2782                          * bnx2x_has_rx_work and if there is no rmb, the memory
2783                          * reading (bnx2x_update_fpsb_idx) may be postponed
2784                          * to right before bnx2x_ack_sb). In this case there
2785                          * will never be another interrupt until there is
2786                          * another update of the status block, while there
2787                          * is still unhandled work.
2788                          */
2789                         rmb();
2790
2791                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2792                                 napi_complete(napi);
2793                                 /* Re-enable interrupts */
2794                                 DP(NETIF_MSG_RX_STATUS,
2795                                    "Update index to %d\n", fp->fp_hc_idx);
2796                                 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2797                                              le16_to_cpu(fp->fp_hc_idx),
2798                                              IGU_INT_ENABLE, 1);
2799                                 break;
2800                         }
2801                 }
2802         }
2803
2804         return work_done;
2805 }
2806
2807 /* we split the first BD into headers and data BDs
2808  * to ease the pain of our fellow microcode engineers
2809  * we use one mapping for both BDs
2810  */
2811 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
2812                                    struct bnx2x_fp_txdata *txdata,
2813                                    struct sw_tx_bd *tx_buf,
2814                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
2815                                    u16 bd_prod, int nbd)
2816 {
2817         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2818         struct eth_tx_bd *d_tx_bd;
2819         dma_addr_t mapping;
2820         int old_len = le16_to_cpu(h_tx_bd->nbytes);
2821
2822         /* first fix first BD */
2823         h_tx_bd->nbd = cpu_to_le16(nbd);
2824         h_tx_bd->nbytes = cpu_to_le16(hlen);
2825
2826         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
2827            h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
2828
2829         /* now get a new data BD
2830          * (after the pbd) and fill it */
2831         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2832         d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2833
2834         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2835                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2836
2837         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2838         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2839         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2840
2841         /* this marks the BD as one that has no individual mapping */
2842         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2843
2844         DP(NETIF_MSG_TX_QUEUED,
2845            "TSO split data size is %d (%x:%x)\n",
2846            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2847
2848         /* update tx_bd */
2849         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2850
2851         return bd_prod;
2852 }
2853
2854 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2855 {
2856         if (fix > 0)
2857                 csum = (u16) ~csum_fold(csum_sub(csum,
2858                                 csum_partial(t_header - fix, fix, 0)));
2859
2860         else if (fix < 0)
2861                 csum = (u16) ~csum_fold(csum_add(csum,
2862                                 csum_partial(t_header, -fix, 0)));
2863
2864         return swab16(csum);
2865 }
2866
2867 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2868 {
2869         u32 rc;
2870
2871         if (skb->ip_summed != CHECKSUM_PARTIAL)
2872                 rc = XMIT_PLAIN;
2873
2874         else {
2875                 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
2876                         rc = XMIT_CSUM_V6;
2877                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2878                                 rc |= XMIT_CSUM_TCP;
2879
2880                 } else {
2881                         rc = XMIT_CSUM_V4;
2882                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2883                                 rc |= XMIT_CSUM_TCP;
2884                 }
2885         }
2886
2887         if (skb_is_gso_v6(skb))
2888                 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2889         else if (skb_is_gso(skb))
2890                 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
2891
2892         return rc;
2893 }
2894
2895 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2896 /* check if packet requires linearization (packet is too fragmented)
2897    no need to check fragmentation if page size > 8K (there will be no
2898    violation to FW restrictions) */
2899 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2900                              u32 xmit_type)
2901 {
2902         int to_copy = 0;
2903         int hlen = 0;
2904         int first_bd_sz = 0;
2905
2906         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2907         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2908
2909                 if (xmit_type & XMIT_GSO) {
2910                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2911                         /* Check if LSO packet needs to be copied:
2912                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2913                         int wnd_size = MAX_FETCH_BD - 3;
2914                         /* Number of windows to check */
2915                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2916                         int wnd_idx = 0;
2917                         int frag_idx = 0;
2918                         u32 wnd_sum = 0;
2919
2920                         /* Headers length */
2921                         hlen = (int)(skb_transport_header(skb) - skb->data) +
2922                                 tcp_hdrlen(skb);
2923
2924                         /* Amount of data (w/o headers) on linear part of SKB*/
2925                         first_bd_sz = skb_headlen(skb) - hlen;
2926
2927                         wnd_sum  = first_bd_sz;
2928
2929                         /* Calculate the first sum - it's special */
2930                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2931                                 wnd_sum +=
2932                                         skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
2933
2934                         /* If there was data on linear skb data - check it */
2935                         if (first_bd_sz > 0) {
2936                                 if (unlikely(wnd_sum < lso_mss)) {
2937                                         to_copy = 1;
2938                                         goto exit_lbl;
2939                                 }
2940
2941                                 wnd_sum -= first_bd_sz;
2942                         }
2943
2944                         /* Others are easier: run through the frag list and
2945                            check all windows */
2946                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2947                                 wnd_sum +=
2948                           skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
2949
2950                                 if (unlikely(wnd_sum < lso_mss)) {
2951                                         to_copy = 1;
2952                                         break;
2953                                 }
2954                                 wnd_sum -=
2955                                         skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
2956                         }
2957                 } else {
2958                         /* in non-LSO too fragmented packet should always
2959                            be linearized */
2960                         to_copy = 1;
2961                 }
2962         }
2963
2964 exit_lbl:
2965         if (unlikely(to_copy))
2966                 DP(NETIF_MSG_TX_QUEUED,
2967                    "Linearization IS REQUIRED for %s packet. num_frags %d  hlen %d  first_bd_sz %d\n",
2968                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2969                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2970
2971         return to_copy;
2972 }
2973 #endif
2974
2975 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2976                                         u32 xmit_type)
2977 {
2978         *parsing_data |= (skb_shinfo(skb)->gso_size <<
2979                               ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2980                               ETH_TX_PARSE_BD_E2_LSO_MSS;
2981         if ((xmit_type & XMIT_GSO_V6) &&
2982             (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2983                 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
2984 }
2985
2986 /**
2987  * bnx2x_set_pbd_gso - update PBD in GSO case.
2988  *
2989  * @skb:        packet skb
2990  * @pbd:        parse BD
2991  * @xmit_type:  xmit flags
2992  */
2993 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2994                                      struct eth_tx_parse_bd_e1x *pbd,
2995                                      u32 xmit_type)
2996 {
2997         pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2998         pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2999         pbd->tcp_flags = pbd_tcp_flags(skb);
3000
3001         if (xmit_type & XMIT_GSO_V4) {
3002                 pbd->ip_id = swab16(ip_hdr(skb)->id);
3003                 pbd->tcp_pseudo_csum =
3004                         swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3005                                                   ip_hdr(skb)->daddr,
3006                                                   0, IPPROTO_TCP, 0));
3007
3008         } else
3009                 pbd->tcp_pseudo_csum =
3010                         swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3011                                                 &ipv6_hdr(skb)->daddr,
3012                                                 0, IPPROTO_TCP, 0));
3013
3014         pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
3015 }
3016
3017 /**
3018  * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3019  *
3020  * @bp:                 driver handle
3021  * @skb:                packet skb
3022  * @parsing_data:       data to be updated
3023  * @xmit_type:          xmit flags
3024  *
3025  * 57712 related
3026  */
3027 static inline  u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3028         u32 *parsing_data, u32 xmit_type)
3029 {
3030         *parsing_data |=
3031                         ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3032                         ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
3033                         ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
3034
3035         if (xmit_type & XMIT_CSUM_TCP) {
3036                 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3037                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3038                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3039
3040                 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3041         } else
3042                 /* We support checksum offload for TCP and UDP only.
3043                  * No need to pass the UDP header length - it's a constant.
3044                  */
3045                 return skb_transport_header(skb) +
3046                                 sizeof(struct udphdr) - skb->data;
3047 }
3048
3049 static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3050         struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
3051 {
3052         tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3053
3054         if (xmit_type & XMIT_CSUM_V4)
3055                 tx_start_bd->bd_flags.as_bitfield |=
3056                                         ETH_TX_BD_FLAGS_IP_CSUM;
3057         else
3058                 tx_start_bd->bd_flags.as_bitfield |=
3059                                         ETH_TX_BD_FLAGS_IPV6;
3060
3061         if (!(xmit_type & XMIT_CSUM_TCP))
3062                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3063 }
3064
3065 /**
3066  * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3067  *
3068  * @bp:         driver handle
3069  * @skb:        packet skb
3070  * @pbd:        parse BD to be updated
3071  * @xmit_type:  xmit flags
3072  */
3073 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3074         struct eth_tx_parse_bd_e1x *pbd,
3075         u32 xmit_type)
3076 {
3077         u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3078
3079         /* for now NS flag is not used in Linux */
3080         pbd->global_data =
3081                 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3082                          ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3083
3084         pbd->ip_hlen_w = (skb_transport_header(skb) -
3085                         skb_network_header(skb)) >> 1;
3086
3087         hlen += pbd->ip_hlen_w;
3088
3089         /* We support checksum offload for TCP and UDP only */
3090         if (xmit_type & XMIT_CSUM_TCP)
3091                 hlen += tcp_hdrlen(skb) / 2;
3092         else
3093                 hlen += sizeof(struct udphdr) / 2;
3094
3095         pbd->total_hlen_w = cpu_to_le16(hlen);
3096         hlen = hlen*2;
3097
3098         if (xmit_type & XMIT_CSUM_TCP) {
3099                 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
3100
3101         } else {
3102                 s8 fix = SKB_CS_OFF(skb); /* signed! */
3103
3104                 DP(NETIF_MSG_TX_QUEUED,
3105                    "hlen %d  fix %d  csum before fix %x\n",
3106                    le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3107
3108                 /* HW bug: fixup the CSUM */
3109                 pbd->tcp_pseudo_csum =
3110                         bnx2x_csum_fix(skb_transport_header(skb),
3111                                        SKB_CS(skb), fix);
3112
3113                 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3114                    pbd->tcp_pseudo_csum);
3115         }
3116
3117         return hlen;
3118 }
3119
3120 /* called with netif_tx_lock
3121  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3122  * netif_wake_queue()
3123  */
3124 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3125 {
3126         struct bnx2x *bp = netdev_priv(dev);
3127
3128         struct netdev_queue *txq;
3129         struct bnx2x_fp_txdata *txdata;
3130         struct sw_tx_bd *tx_buf;
3131         struct eth_tx_start_bd *tx_start_bd, *first_bd;
3132         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3133         struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3134         struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3135         u32 pbd_e2_parsing_data = 0;
3136         u16 pkt_prod, bd_prod;
3137         int nbd, txq_index;
3138         dma_addr_t mapping;
3139         u32 xmit_type = bnx2x_xmit_type(bp, skb);
3140         int i;
3141         u8 hlen = 0;
3142         __le16 pkt_size = 0;
3143         struct ethhdr *eth;
3144         u8 mac_type = UNICAST_ADDRESS;
3145
3146 #ifdef BNX2X_STOP_ON_ERROR
3147         if (unlikely(bp->panic))
3148                 return NETDEV_TX_BUSY;
3149 #endif
3150
3151         txq_index = skb_get_queue_mapping(skb);
3152         txq = netdev_get_tx_queue(dev, txq_index);
3153
3154         BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3155
3156         txdata = &bp->bnx2x_txq[txq_index];
3157
3158         /* enable this debug print to view the transmission queue being used
3159         DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3160            txq_index, fp_index, txdata_index); */
3161
3162         /* enable this debug print to view the tranmission details
3163         DP(NETIF_MSG_TX_QUEUED,
3164            "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3165            txdata->cid, fp_index, txdata_index, txdata, fp); */
3166
3167         if (unlikely(bnx2x_tx_avail(bp, txdata) <
3168                         skb_shinfo(skb)->nr_frags +
3169                         BDS_PER_TX_PKT +
3170                         NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3171                 /* Handle special storage cases separately */
3172                 if (txdata->tx_ring_size == 0) {
3173                         struct bnx2x_eth_q_stats *q_stats =
3174                                 bnx2x_fp_qstats(bp, txdata->parent_fp);
3175                         q_stats->driver_filtered_tx_pkt++;
3176                         dev_kfree_skb(skb);
3177                         return NETDEV_TX_OK;
3178                 }
3179                         bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3180                         netif_tx_stop_queue(txq);
3181                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3182
3183                 return NETDEV_TX_BUSY;
3184         }
3185
3186         DP(NETIF_MSG_TX_QUEUED,
3187            "queue[%d]: SKB: summed %x  protocol %x protocol(%x,%x) gso type %x  xmit_type %x\n",
3188            txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3189            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
3190
3191         eth = (struct ethhdr *)skb->data;
3192
3193         /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3194         if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3195                 if (is_broadcast_ether_addr(eth->h_dest))
3196                         mac_type = BROADCAST_ADDRESS;
3197                 else
3198                         mac_type = MULTICAST_ADDRESS;
3199         }
3200
3201 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3202         /* First, check if we need to linearize the skb (due to FW
3203            restrictions). No need to check fragmentation if page size > 8K
3204            (there will be no violation to FW restrictions) */
3205         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3206                 /* Statistics of linearization */
3207                 bp->lin_cnt++;
3208                 if (skb_linearize(skb) != 0) {
3209                         DP(NETIF_MSG_TX_QUEUED,
3210                            "SKB linearization failed - silently dropping this SKB\n");
3211                         dev_kfree_skb_any(skb);
3212                         return NETDEV_TX_OK;
3213                 }
3214         }
3215 #endif
3216         /* Map skb linear data for DMA */
3217         mapping = dma_map_single(&bp->pdev->dev, skb->data,
3218                                  skb_headlen(skb), DMA_TO_DEVICE);
3219         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3220                 DP(NETIF_MSG_TX_QUEUED,
3221                    "SKB mapping failed - silently dropping this SKB\n");
3222                 dev_kfree_skb_any(skb);
3223                 return NETDEV_TX_OK;
3224         }
3225         /*
3226         Please read carefully. First we use one BD which we mark as start,
3227         then we have a parsing info BD (used for TSO or xsum),
3228         and only then we have the rest of the TSO BDs.
3229         (don't forget to mark the last one as last,
3230         and to unmap only AFTER you write to the BD ...)
3231         And above all, all pdb sizes are in words - NOT DWORDS!
3232         */
3233
3234         /* get current pkt produced now - advance it just before sending packet
3235          * since mapping of pages may fail and cause packet to be dropped
3236          */
3237         pkt_prod = txdata->tx_pkt_prod;
3238         bd_prod = TX_BD(txdata->tx_bd_prod);
3239
3240         /* get a tx_buf and first BD
3241          * tx_start_bd may be changed during SPLIT,
3242          * but first_bd will always stay first
3243          */
3244         tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3245         tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3246         first_bd = tx_start_bd;
3247
3248         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3249         SET_FLAG(tx_start_bd->general_data,
3250                  ETH_TX_START_BD_PARSE_NBDS,
3251                  0);
3252
3253         /* header nbd */
3254         SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
3255
3256         /* remember the first BD of the packet */
3257         tx_buf->first_bd = txdata->tx_bd_prod;
3258         tx_buf->skb = skb;
3259         tx_buf->flags = 0;
3260
3261         DP(NETIF_MSG_TX_QUEUED,
3262            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
3263            pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3264
3265         if (vlan_tx_tag_present(skb)) {
3266                 tx_start_bd->vlan_or_ethertype =
3267                     cpu_to_le16(vlan_tx_tag_get(skb));
3268                 tx_start_bd->bd_flags.as_bitfield |=
3269                     (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3270         } else
3271                 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3272
3273         /* turn on parsing and get a BD */
3274         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3275
3276         if (xmit_type & XMIT_CSUM)
3277                 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3278
3279         if (!CHIP_IS_E1x(bp)) {
3280                 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3281                 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3282                 /* Set PBD in checksum offload case */
3283                 if (xmit_type & XMIT_CSUM)
3284                         hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3285                                                      &pbd_e2_parsing_data,
3286                                                      xmit_type);
3287                 if (IS_MF_SI(bp)) {
3288                         /*
3289                          * fill in the MAC addresses in the PBD - for local
3290                          * switching
3291                          */
3292                         bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
3293                                               &pbd_e2->src_mac_addr_mid,
3294                                               &pbd_e2->src_mac_addr_lo,
3295                                               eth->h_source);
3296                         bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
3297                                               &pbd_e2->dst_mac_addr_mid,
3298                                               &pbd_e2->dst_mac_addr_lo,
3299                                               eth->h_dest);
3300                 }
3301
3302                 SET_FLAG(pbd_e2_parsing_data,
3303                          ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3304         } else {
3305                 u16 global_data = 0;
3306                 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3307                 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3308                 /* Set PBD in checksum offload case */
3309                 if (xmit_type & XMIT_CSUM)
3310                         hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3311
3312                 SET_FLAG(global_data,
3313                          ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3314                 pbd_e1x->global_data |= cpu_to_le16(global_data);
3315         }
3316
3317         /* Setup the data pointer of the first BD of the packet */
3318         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3319         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3320         nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3321         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3322         pkt_size = tx_start_bd->nbytes;
3323
3324         DP(NETIF_MSG_TX_QUEUED,
3325            "first bd @%p  addr (%x:%x)  nbd %d  nbytes %d  flags %x  vlan %x\n",
3326            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3327            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
3328            tx_start_bd->bd_flags.as_bitfield,
3329            le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3330
3331         if (xmit_type & XMIT_GSO) {
3332
3333                 DP(NETIF_MSG_TX_QUEUED,
3334                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
3335                    skb->len, hlen, skb_headlen(skb),
3336                    skb_shinfo(skb)->gso_size);
3337
3338                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3339
3340                 if (unlikely(skb_headlen(skb) > hlen))
3341                         bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3342                                                  &tx_start_bd, hlen,
3343                                                  bd_prod, ++nbd);
3344                 if (!CHIP_IS_E1x(bp))
3345                         bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3346                                              xmit_type);
3347                 else
3348                         bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
3349         }
3350
3351         /* Set the PBD's parsing_data field if not zero
3352          * (for the chips newer than 57711).
3353          */
3354         if (pbd_e2_parsing_data)
3355                 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3356
3357         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3358
3359         /* Handle fragmented skb */
3360         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3361                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3362
3363                 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3364                                            skb_frag_size(frag), DMA_TO_DEVICE);
3365                 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3366                         unsigned int pkts_compl = 0, bytes_compl = 0;
3367
3368                         DP(NETIF_MSG_TX_QUEUED,
3369                            "Unable to map page - dropping packet...\n");
3370
3371                         /* we need unmap all buffers already mapped
3372                          * for this SKB;
3373                          * first_bd->nbd need to be properly updated
3374                          * before call to bnx2x_free_tx_pkt
3375                          */
3376                         first_bd->nbd = cpu_to_le16(nbd);
3377                         bnx2x_free_tx_pkt(bp, txdata,
3378                                           TX_BD(txdata->tx_pkt_prod),
3379                                           &pkts_compl, &bytes_compl);
3380                         return NETDEV_TX_OK;
3381                 }
3382
3383                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3384                 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3385                 if (total_pkt_bd == NULL)
3386                         total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3387
3388                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3389                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3390                 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3391                 le16_add_cpu(&pkt_size, skb_frag_size(frag));
3392                 nbd++;
3393
3394                 DP(NETIF_MSG_TX_QUEUED,
3395                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
3396                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3397                    le16_to_cpu(tx_data_bd->nbytes));
3398         }
3399
3400         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3401
3402         /* update with actual num BDs */
3403         first_bd->nbd = cpu_to_le16(nbd);
3404
3405         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3406
3407         /* now send a tx doorbell, counting the next BD
3408          * if the packet contains or ends with it
3409          */
3410         if (TX_BD_POFF(bd_prod) < nbd)
3411                 nbd++;
3412
3413         /* total_pkt_bytes should be set on the first data BD if
3414          * it's not an LSO packet and there is more than one
3415          * data BD. In this case pkt_size is limited by an MTU value.
3416          * However we prefer to set it for an LSO packet (while we don't
3417          * have to) in order to save some CPU cycles in a none-LSO
3418          * case, when we much more care about them.
3419          */
3420         if (total_pkt_bd != NULL)
3421                 total_pkt_bd->total_pkt_bytes = pkt_size;
3422
3423         if (pbd_e1x)
3424                 DP(NETIF_MSG_TX_QUEUED,
3425                    "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
3426                    pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3427                    pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3428                    pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3429                     le16_to_cpu(pbd_e1x->total_hlen_w));
3430         if (pbd_e2)
3431                 DP(NETIF_MSG_TX_QUEUED,
3432                    "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
3433                    pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
3434                    pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
3435                    pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
3436                    pbd_e2->parsing_data);
3437         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
3438
3439         netdev_tx_sent_queue(txq, skb->len);
3440
3441         skb_tx_timestamp(skb);
3442
3443         txdata->tx_pkt_prod++;
3444         /*
3445          * Make sure that the BD data is updated before updating the producer
3446          * since FW might read the BD right after the producer is updated.
3447          * This is only applicable for weak-ordered memory model archs such
3448          * as IA-64. The following barrier is also mandatory since FW will
3449          * assumes packets must have BDs.
3450          */
3451         wmb();
3452
3453         txdata->tx_db.data.prod += nbd;
3454         barrier();
3455
3456         DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
3457
3458         mmiowb();
3459
3460         txdata->tx_bd_prod += nbd;
3461
3462         if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
3463                 netif_tx_stop_queue(txq);
3464
3465                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3466                  * ordering of set_bit() in netif_tx_stop_queue() and read of
3467                  * fp->bd_tx_cons */
3468                 smp_mb();
3469
3470                 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3471                 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
3472                         netif_tx_wake_queue(txq);
3473         }
3474         txdata->tx_pkt++;
3475
3476         return NETDEV_TX_OK;
3477 }
3478
3479 /**
3480  * bnx2x_setup_tc - routine to configure net_device for multi tc
3481  *
3482  * @netdev: net device to configure
3483  * @tc: number of traffic classes to enable
3484  *
3485  * callback connected to the ndo_setup_tc function pointer
3486  */
3487 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3488 {
3489         int cos, prio, count, offset;
3490         struct bnx2x *bp = netdev_priv(dev);
3491
3492         /* setup tc must be called under rtnl lock */
3493         ASSERT_RTNL();
3494
3495         /* no traffic classes requested. aborting */
3496         if (!num_tc) {
3497                 netdev_reset_tc(dev);
3498                 return 0;
3499         }
3500
3501         /* requested to support too many traffic classes */
3502         if (num_tc > bp->max_cos) {
3503                 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3504                           num_tc, bp->max_cos);
3505                 return -EINVAL;
3506         }
3507
3508         /* declare amount of supported traffic classes */
3509         if (netdev_set_num_tc(dev, num_tc)) {
3510                 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
3511                 return -EINVAL;
3512         }
3513
3514         /* configure priority to traffic class mapping */
3515         for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3516                 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
3517                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3518                    "mapping priority %d to tc %d\n",
3519                    prio, bp->prio_to_cos[prio]);
3520         }
3521
3522
3523         /* Use this configuration to diffrentiate tc0 from other COSes
3524            This can be used for ets or pfc, and save the effort of setting
3525            up a multio class queue disc or negotiating DCBX with a switch
3526         netdev_set_prio_tc_map(dev, 0, 0);
3527         DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
3528         for (prio = 1; prio < 16; prio++) {
3529                 netdev_set_prio_tc_map(dev, prio, 1);
3530                 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
3531         } */
3532
3533         /* configure traffic class to transmission queue mapping */
3534         for (cos = 0; cos < bp->max_cos; cos++) {
3535                 count = BNX2X_NUM_ETH_QUEUES(bp);
3536                 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
3537                 netdev_set_tc_queue(dev, cos, count, offset);
3538                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3539                    "mapping tc %d to offset %d count %d\n",
3540                    cos, offset, count);
3541         }
3542
3543         return 0;
3544 }
3545
3546 /* called with rtnl_lock */
3547 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3548 {
3549         struct sockaddr *addr = p;
3550         struct bnx2x *bp = netdev_priv(dev);
3551         int rc = 0;
3552
3553         if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3554                 BNX2X_ERR("Requested MAC address is not valid\n");
3555                 return -EINVAL;
3556         }
3557
3558         if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3559             !is_zero_ether_addr(addr->sa_data)) {
3560                 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
3561                 return -EINVAL;
3562         }
3563
3564         if (netif_running(dev))  {
3565                 rc = bnx2x_set_eth_mac(bp, false);
3566                 if (rc)
3567                         return rc;
3568         }
3569
3570         dev->addr_assign_type &= ~NET_ADDR_RANDOM;
3571         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3572
3573         if (netif_running(dev))
3574                 rc = bnx2x_set_eth_mac(bp, true);
3575
3576         return rc;
3577 }
3578
3579 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3580 {
3581         union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3582         struct bnx2x_fastpath *fp = &bp->fp[fp_index];
3583         u8 cos;
3584
3585         /* Common */
3586
3587         if (IS_FCOE_IDX(fp_index)) {
3588                 memset(sb, 0, sizeof(union host_hc_status_block));
3589                 fp->status_blk_mapping = 0;
3590         } else {
3591                 /* status blocks */
3592                 if (!CHIP_IS_E1x(bp))
3593                         BNX2X_PCI_FREE(sb->e2_sb,
3594                                        bnx2x_fp(bp, fp_index,
3595                                                 status_blk_mapping),
3596                                        sizeof(struct host_hc_status_block_e2));
3597                 else
3598                         BNX2X_PCI_FREE(sb->e1x_sb,
3599                                        bnx2x_fp(bp, fp_index,
3600                                                 status_blk_mapping),
3601                                        sizeof(struct host_hc_status_block_e1x));
3602         }
3603
3604         /* Rx */
3605         if (!skip_rx_queue(bp, fp_index)) {
3606                 bnx2x_free_rx_bds(fp);
3607
3608                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3609                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3610                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3611                                bnx2x_fp(bp, fp_index, rx_desc_mapping),
3612                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
3613
3614                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3615                                bnx2x_fp(bp, fp_index, rx_comp_mapping),
3616                                sizeof(struct eth_fast_path_rx_cqe) *
3617                                NUM_RCQ_BD);
3618
3619                 /* SGE ring */
3620                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3621                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3622                                bnx2x_fp(bp, fp_index, rx_sge_mapping),
3623                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3624         }
3625
3626         /* Tx */
3627         if (!skip_tx_queue(bp, fp_index)) {
3628                 /* fastpath tx rings: tx_buf tx_desc */
3629                 for_each_cos_in_tx_queue(fp, cos) {
3630                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
3631
3632                         DP(NETIF_MSG_IFDOWN,
3633                            "freeing tx memory of fp %d cos %d cid %d\n",
3634                            fp_index, cos, txdata->cid);
3635
3636                         BNX2X_FREE(txdata->tx_buf_ring);
3637                         BNX2X_PCI_FREE(txdata->tx_desc_ring,
3638                                 txdata->tx_desc_mapping,
3639                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3640                 }
3641         }
3642         /* end of fastpath */
3643 }
3644
3645 void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
3646 {
3647         int i;
3648         for_each_cnic_queue(bp, i)
3649                 bnx2x_free_fp_mem_at(bp, i);
3650 }
3651
3652 void bnx2x_free_fp_mem(struct bnx2x *bp)
3653 {
3654         int i;
3655         for_each_eth_queue(bp, i)
3656                 bnx2x_free_fp_mem_at(bp, i);
3657 }
3658
3659 static void set_sb_shortcuts(struct bnx2x *bp, int index)
3660 {
3661         union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
3662         if (!CHIP_IS_E1x(bp)) {
3663                 bnx2x_fp(bp, index, sb_index_values) =
3664                         (__le16 *)status_blk.e2_sb->sb.index_values;
3665                 bnx2x_fp(bp, index, sb_running_index) =
3666                         (__le16 *)status_blk.e2_sb->sb.running_index;
3667         } else {
3668                 bnx2x_fp(bp, index, sb_index_values) =
3669                         (__le16 *)status_blk.e1x_sb->sb.index_values;
3670                 bnx2x_fp(bp, index, sb_running_index) =
3671                         (__le16 *)status_blk.e1x_sb->sb.running_index;
3672         }
3673 }
3674
3675 /* Returns the number of actually allocated BDs */
3676 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3677                               int rx_ring_size)
3678 {
3679         struct bnx2x *bp = fp->bp;
3680         u16 ring_prod, cqe_ring_prod;
3681         int i, failure_cnt = 0;
3682
3683         fp->rx_comp_cons = 0;
3684         cqe_ring_prod = ring_prod = 0;
3685
3686         /* This routine is called only during fo init so
3687          * fp->eth_q_stats.rx_skb_alloc_failed = 0
3688          */
3689         for (i = 0; i < rx_ring_size; i++) {
3690                 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
3691                         failure_cnt++;
3692                         continue;
3693                 }
3694                 ring_prod = NEXT_RX_IDX(ring_prod);
3695                 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
3696                 WARN_ON(ring_prod <= (i - failure_cnt));
3697         }
3698
3699         if (failure_cnt)
3700                 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
3701                           i - failure_cnt, fp->index);
3702
3703         fp->rx_bd_prod = ring_prod;
3704         /* Limit the CQE producer by the CQE ring size */
3705         fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
3706                                cqe_ring_prod);
3707         fp->rx_pkt = fp->rx_calls = 0;
3708
3709         bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
3710
3711         return i - failure_cnt;
3712 }
3713
3714 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
3715 {
3716         int i;
3717
3718         for (i = 1; i <= NUM_RCQ_RINGS; i++) {
3719                 struct eth_rx_cqe_next_page *nextpg;
3720
3721                 nextpg = (struct eth_rx_cqe_next_page *)
3722                         &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
3723                 nextpg->addr_hi =
3724                         cpu_to_le32(U64_HI(fp->rx_comp_mapping +
3725                                    BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3726                 nextpg->addr_lo =
3727                         cpu_to_le32(U64_LO(fp->rx_comp_mapping +
3728                                    BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3729         }
3730 }
3731
3732 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3733 {
3734         union host_hc_status_block *sb;
3735         struct bnx2x_fastpath *fp = &bp->fp[index];
3736         int ring_size = 0;
3737         u8 cos;
3738         int rx_ring_size = 0;
3739
3740         if (!bp->rx_ring_size &&
3741             (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
3742                 rx_ring_size = MIN_RX_SIZE_NONTPA;
3743                 bp->rx_ring_size = rx_ring_size;
3744         } else if (!bp->rx_ring_size) {
3745                 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3746
3747                 if (CHIP_IS_E3(bp)) {
3748                         u32 cfg = SHMEM_RD(bp,
3749                                            dev_info.port_hw_config[BP_PORT(bp)].
3750                                            default_cfg);
3751
3752                         /* Decrease ring size for 1G functions */
3753                         if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
3754                             PORT_HW_CFG_NET_SERDES_IF_SGMII)
3755                                 rx_ring_size /= 10;
3756                 }
3757
3758                 /* allocate at least number of buffers required by FW */
3759                 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3760                                      MIN_RX_SIZE_TPA, rx_ring_size);
3761
3762                 bp->rx_ring_size = rx_ring_size;
3763         } else /* if rx_ring_size specified - use it */
3764                 rx_ring_size = bp->rx_ring_size;
3765
3766         /* Common */
3767         sb = &bnx2x_fp(bp, index, status_blk);
3768
3769         if (!IS_FCOE_IDX(index)) {
3770                 /* status blocks */
3771                 if (!CHIP_IS_E1x(bp))
3772                         BNX2X_PCI_ALLOC(sb->e2_sb,
3773                                 &bnx2x_fp(bp, index, status_blk_mapping),
3774                                 sizeof(struct host_hc_status_block_e2));
3775                 else
3776                         BNX2X_PCI_ALLOC(sb->e1x_sb,
3777                                 &bnx2x_fp(bp, index, status_blk_mapping),
3778                             sizeof(struct host_hc_status_block_e1x));
3779         }
3780
3781         /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3782          * set shortcuts for it.
3783          */
3784         if (!IS_FCOE_IDX(index))
3785                 set_sb_shortcuts(bp, index);
3786
3787         /* Tx */
3788         if (!skip_tx_queue(bp, index)) {
3789                 /* fastpath tx rings: tx_buf tx_desc */
3790                 for_each_cos_in_tx_queue(fp, cos) {
3791                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
3792
3793                         DP(NETIF_MSG_IFUP,
3794                            "allocating tx memory of fp %d cos %d\n",
3795                            index, cos);
3796
3797                         BNX2X_ALLOC(txdata->tx_buf_ring,
3798                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
3799                         BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3800                                 &txdata->tx_desc_mapping,
3801                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3802                 }
3803         }
3804
3805         /* Rx */
3806         if (!skip_rx_queue(bp, index)) {
3807                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3808                 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3809                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3810                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3811                                 &bnx2x_fp(bp, index, rx_desc_mapping),
3812                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3813
3814                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3815                                 &bnx2x_fp(bp, index, rx_comp_mapping),
3816                                 sizeof(struct eth_fast_path_rx_cqe) *
3817                                 NUM_RCQ_BD);
3818
3819                 /* SGE ring */
3820                 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3821                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3822                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3823                                 &bnx2x_fp(bp, index, rx_sge_mapping),
3824                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3825                 /* RX BD ring */
3826                 bnx2x_set_next_page_rx_bd(fp);
3827
3828                 /* CQ ring */
3829                 bnx2x_set_next_page_rx_cq(fp);
3830
3831                 /* BDs */
3832                 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3833                 if (ring_size < rx_ring_size)
3834                         goto alloc_mem_err;
3835         }
3836
3837         return 0;
3838
3839 /* handles low memory cases */
3840 alloc_mem_err:
3841         BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3842                                                 index, ring_size);
3843         /* FW will drop all packets if queue is not big enough,
3844          * In these cases we disable the queue
3845          * Min size is different for OOO, TPA and non-TPA queues
3846          */
3847         if (ring_size < (fp->disable_tpa ?
3848                                 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
3849                         /* release memory allocated for this queue */
3850                         bnx2x_free_fp_mem_at(bp, index);
3851                         return -ENOMEM;
3852         }
3853         return 0;
3854 }
3855
3856 int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
3857 {
3858         if (!NO_FCOE(bp))
3859                 /* FCoE */
3860                 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
3861                         /* we will fail load process instead of mark
3862                          * NO_FCOE_FLAG
3863                          */
3864                         return -ENOMEM;
3865
3866         return 0;
3867 }
3868
3869 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3870 {
3871         int i;
3872
3873         /* 1. Allocate FP for leading - fatal if error
3874          * 2. Allocate RSS - fix number of queues if error
3875          */
3876
3877         /* leading */
3878         if (bnx2x_alloc_fp_mem_at(bp, 0))
3879                 return -ENOMEM;
3880
3881         /* RSS */
3882         for_each_nondefault_eth_queue(bp, i)
3883                 if (bnx2x_alloc_fp_mem_at(bp, i))
3884                         break;
3885
3886         /* handle memory failures */
3887         if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3888                 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3889
3890                 WARN_ON(delta < 0);
3891                 bnx2x_shrink_eth_fp(bp, delta);
3892                 if (CNIC_SUPPORT(bp))
3893                         /* move non eth FPs next to last eth FP
3894                          * must be done in that order
3895                          * FCOE_IDX < FWD_IDX < OOO_IDX
3896                          */
3897
3898                         /* move FCoE fp even NO_FCOE_FLAG is on */
3899                         bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
3900                 bp->num_ethernet_queues -= delta;
3901                 bp->num_queues = bp->num_ethernet_queues +
3902                                  bp->num_cnic_queues;
3903                 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3904                           bp->num_queues + delta, bp->num_queues);
3905         }
3906
3907         return 0;
3908 }
3909
3910 void bnx2x_free_mem_bp(struct bnx2x *bp)
3911 {
3912         kfree(bp->fp->tpa_info);
3913         kfree(bp->fp);
3914         kfree(bp->sp_objs);
3915         kfree(bp->fp_stats);
3916         kfree(bp->bnx2x_txq);
3917         kfree(bp->msix_table);
3918         kfree(bp->ilt);
3919 }
3920
3921 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
3922 {
3923         struct bnx2x_fastpath *fp;
3924         struct msix_entry *tbl;
3925         struct bnx2x_ilt *ilt;
3926         int msix_table_size = 0;
3927         int fp_array_size, txq_array_size;
3928         int i;
3929
3930         /*
3931          * The biggest MSI-X table we might need is as a maximum number of fast
3932          * path IGU SBs plus default SB (for PF).
3933          */
3934         msix_table_size = bp->igu_sb_cnt + 1;
3935
3936         /* fp array: RSS plus CNIC related L2 queues */
3937         fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
3938         BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
3939
3940         fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
3941         if (!fp)
3942                 goto alloc_err;
3943         for (i = 0; i < fp_array_size; i++) {
3944                 fp[i].tpa_info =
3945                         kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
3946                                 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
3947                 if (!(fp[i].tpa_info))
3948                         goto alloc_err;
3949         }
3950
3951         bp->fp = fp;
3952
3953         /* allocate sp objs */
3954         bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
3955                               GFP_KERNEL);
3956         if (!bp->sp_objs)
3957                 goto alloc_err;
3958
3959         /* allocate fp_stats */
3960         bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
3961                                GFP_KERNEL);
3962         if (!bp->fp_stats)
3963                 goto alloc_err;
3964
3965         /* Allocate memory for the transmission queues array */
3966         txq_array_size =
3967                 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
3968         BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
3969
3970         bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
3971                                 GFP_KERNEL);
3972         if (!bp->bnx2x_txq)
3973                 goto alloc_err;
3974
3975         /* msix table */
3976         tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
3977         if (!tbl)
3978                 goto alloc_err;
3979         bp->msix_table = tbl;
3980
3981         /* ilt */
3982         ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3983         if (!ilt)
3984                 goto alloc_err;
3985         bp->ilt = ilt;
3986
3987         return 0;
3988 alloc_err:
3989         bnx2x_free_mem_bp(bp);
3990         return -ENOMEM;
3991
3992 }
3993
3994 int bnx2x_reload_if_running(struct net_device *dev)
3995 {
3996         struct bnx2x *bp = netdev_priv(dev);
3997
3998         if (unlikely(!netif_running(dev)))
3999                 return 0;
4000
4001         bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4002         return bnx2x_nic_load(bp, LOAD_NORMAL);
4003 }
4004
4005 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4006 {
4007         u32 sel_phy_idx = 0;
4008         if (bp->link_params.num_phys <= 1)
4009                 return INT_PHY;
4010
4011         if (bp->link_vars.link_up) {
4012                 sel_phy_idx = EXT_PHY1;
4013                 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4014                 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4015                     (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4016                         sel_phy_idx = EXT_PHY2;
4017         } else {
4018
4019                 switch (bnx2x_phy_selection(&bp->link_params)) {
4020                 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4021                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4022                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4023                        sel_phy_idx = EXT_PHY1;
4024                        break;
4025                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4026                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4027                        sel_phy_idx = EXT_PHY2;
4028                        break;
4029                 }
4030         }
4031
4032         return sel_phy_idx;
4033
4034 }
4035 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4036 {
4037         u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4038         /*
4039          * The selected actived PHY is always after swapping (in case PHY
4040          * swapping is enabled). So when swapping is enabled, we need to reverse
4041          * the configuration
4042          */
4043
4044         if (bp->link_params.multi_phy_config &
4045             PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4046                 if (sel_phy_idx == EXT_PHY1)
4047                         sel_phy_idx = EXT_PHY2;
4048                 else if (sel_phy_idx == EXT_PHY2)
4049                         sel_phy_idx = EXT_PHY1;
4050         }
4051         return LINK_CONFIG_IDX(sel_phy_idx);
4052 }
4053
4054 #ifdef NETDEV_FCOE_WWNN
4055 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4056 {
4057         struct bnx2x *bp = netdev_priv(dev);
4058         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4059
4060         switch (type) {
4061         case NETDEV_FCOE_WWNN:
4062                 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4063                                 cp->fcoe_wwn_node_name_lo);
4064                 break;
4065         case NETDEV_FCOE_WWPN:
4066                 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4067                                 cp->fcoe_wwn_port_name_lo);
4068                 break;
4069         default:
4070                 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4071                 return -EINVAL;
4072         }
4073
4074         return 0;
4075 }
4076 #endif
4077
4078 /* called with rtnl_lock */
4079 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4080 {
4081         struct bnx2x *bp = netdev_priv(dev);
4082
4083         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4084                 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4085                 return -EAGAIN;
4086         }
4087
4088         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4089             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4090                 BNX2X_ERR("Can't support requested MTU size\n");
4091                 return -EINVAL;
4092         }
4093
4094         /* This does not race with packet allocation
4095          * because the actual alloc size is
4096          * only updated as part of load
4097          */
4098         dev->mtu = new_mtu;
4099
4100         return bnx2x_reload_if_running(dev);
4101 }
4102
4103 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4104                                      netdev_features_t features)
4105 {
4106         struct bnx2x *bp = netdev_priv(dev);
4107
4108         /* TPA requires Rx CSUM offloading */
4109         if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
4110                 features &= ~NETIF_F_LRO;
4111                 features &= ~NETIF_F_GRO;
4112         }
4113
4114         return features;
4115 }
4116
4117 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4118 {
4119         struct bnx2x *bp = netdev_priv(dev);
4120         u32 flags = bp->flags;
4121         bool bnx2x_reload = false;
4122
4123         if (features & NETIF_F_LRO)
4124                 flags |= TPA_ENABLE_FLAG;
4125         else
4126                 flags &= ~TPA_ENABLE_FLAG;
4127
4128         if (features & NETIF_F_GRO)
4129                 flags |= GRO_ENABLE_FLAG;
4130         else
4131                 flags &= ~GRO_ENABLE_FLAG;
4132
4133         if (features & NETIF_F_LOOPBACK) {
4134                 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4135                         bp->link_params.loopback_mode = LOOPBACK_BMAC;
4136                         bnx2x_reload = true;
4137                 }
4138         } else {
4139                 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4140                         bp->link_params.loopback_mode = LOOPBACK_NONE;
4141                         bnx2x_reload = true;
4142                 }
4143         }
4144
4145         if (flags ^ bp->flags) {
4146                 bp->flags = flags;
4147                 bnx2x_reload = true;
4148         }
4149
4150         if (bnx2x_reload) {
4151                 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4152                         return bnx2x_reload_if_running(dev);
4153                 /* else: bnx2x_nic_load() will be called at end of recovery */
4154         }
4155
4156         return 0;
4157 }
4158
4159 void bnx2x_tx_timeout(struct net_device *dev)
4160 {
4161         struct bnx2x *bp = netdev_priv(dev);
4162
4163 #ifdef BNX2X_STOP_ON_ERROR
4164         if (!bp->panic)
4165                 bnx2x_panic();
4166 #endif
4167
4168         smp_mb__before_clear_bit();
4169         set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4170         smp_mb__after_clear_bit();
4171
4172         /* This allows the netif to be shutdown gracefully before resetting */
4173         schedule_delayed_work(&bp->sp_rtnl_task, 0);
4174 }
4175
4176 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4177 {
4178         struct net_device *dev = pci_get_drvdata(pdev);
4179         struct bnx2x *bp;
4180
4181         if (!dev) {
4182                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4183                 return -ENODEV;
4184         }
4185         bp = netdev_priv(dev);
4186
4187         rtnl_lock();
4188
4189         pci_save_state(pdev);
4190
4191         if (!netif_running(dev)) {
4192                 rtnl_unlock();
4193                 return 0;
4194         }
4195
4196         netif_device_detach(dev);
4197
4198         bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4199
4200         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4201
4202         rtnl_unlock();
4203
4204         return 0;
4205 }
4206
4207 int bnx2x_resume(struct pci_dev *pdev)
4208 {
4209         struct net_device *dev = pci_get_drvdata(pdev);
4210         struct bnx2x *bp;
4211         int rc;
4212
4213         if (!dev) {
4214                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4215                 return -ENODEV;
4216         }
4217         bp = netdev_priv(dev);
4218
4219         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4220                 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4221                 return -EAGAIN;
4222         }
4223
4224         rtnl_lock();
4225
4226         pci_restore_state(pdev);
4227
4228         if (!netif_running(dev)) {
4229                 rtnl_unlock();
4230                 return 0;
4231         }
4232
4233         bnx2x_set_power_state(bp, PCI_D0);
4234         netif_device_attach(dev);
4235
4236         rc = bnx2x_nic_load(bp, LOAD_OPEN);
4237
4238         rtnl_unlock();
4239
4240         return rc;
4241 }
4242
4243
4244 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4245                               u32 cid)
4246 {
4247         /* ustorm cxt validation */
4248         cxt->ustorm_ag_context.cdu_usage =
4249                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4250                         CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4251         /* xcontext validation */
4252         cxt->xstorm_ag_context.cdu_reserved =
4253                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4254                         CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4255 }
4256
4257 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4258                                     u8 fw_sb_id, u8 sb_index,
4259                                     u8 ticks)
4260 {
4261
4262         u32 addr = BAR_CSTRORM_INTMEM +
4263                    CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4264         REG_WR8(bp, addr, ticks);
4265         DP(NETIF_MSG_IFUP,
4266            "port %x fw_sb_id %d sb_index %d ticks %d\n",
4267            port, fw_sb_id, sb_index, ticks);
4268 }
4269
4270 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4271                                     u16 fw_sb_id, u8 sb_index,
4272                                     u8 disable)
4273 {
4274         u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4275         u32 addr = BAR_CSTRORM_INTMEM +
4276                    CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4277         u16 flags = REG_RD16(bp, addr);
4278         /* clear and set */
4279         flags &= ~HC_INDEX_DATA_HC_ENABLED;
4280         flags |= enable_flag;
4281         REG_WR16(bp, addr, flags);
4282         DP(NETIF_MSG_IFUP,
4283            "port %x fw_sb_id %d sb_index %d disable %d\n",
4284            port, fw_sb_id, sb_index, disable);
4285 }
4286
4287 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4288                                     u8 sb_index, u8 disable, u16 usec)
4289 {
4290         int port = BP_PORT(bp);
4291         u8 ticks = usec / BNX2X_BTR;
4292
4293         storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4294
4295         disable = disable ? 1 : (usec ? 0 : 1);
4296         storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4297 }