]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
Merge tag 'sunxi-fixes-for-4.12' of https://git.kernel.org/pub/scm/linux/kernel/git...
[karo-tx-linux.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
1 /* bnx2x_cmn.c: QLogic Everest network driver.
2  *
3  * Copyright (c) 2007-2013 Broadcom Corporation
4  * Copyright (c) 2014 QLogic Corporation
5  * All rights reserved
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation.
10  *
11  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
12  * Written by: Eliezer Tamir
13  * Based on code from Michael Chan's bnx2 driver
14  * UDP CSUM errata workaround by Arik Gendelman
15  * Slowpath and fastpath rework by Vladislav Zolotarov
16  * Statistics and Link management by Yitchak Gertner
17  *
18  */
19
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/etherdevice.h>
23 #include <linux/if_vlan.h>
24 #include <linux/interrupt.h>
25 #include <linux/ip.h>
26 #include <linux/crash_dump.h>
27 #include <net/tcp.h>
28 #include <net/ipv6.h>
29 #include <net/ip6_checksum.h>
30 #include <net/busy_poll.h>
31 #include <linux/prefetch.h>
32 #include "bnx2x_cmn.h"
33 #include "bnx2x_init.h"
34 #include "bnx2x_sp.h"
35
36 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
37 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
38 static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
39 static int bnx2x_poll(struct napi_struct *napi, int budget);
40
41 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
42 {
43         int i;
44
45         /* Add NAPI objects */
46         for_each_rx_queue_cnic(bp, i) {
47                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
48                                bnx2x_poll, NAPI_POLL_WEIGHT);
49         }
50 }
51
52 static void bnx2x_add_all_napi(struct bnx2x *bp)
53 {
54         int i;
55
56         /* Add NAPI objects */
57         for_each_eth_queue(bp, i) {
58                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
59                                bnx2x_poll, NAPI_POLL_WEIGHT);
60         }
61 }
62
63 static int bnx2x_calc_num_queues(struct bnx2x *bp)
64 {
65         int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
66
67         /* Reduce memory usage in kdump environment by using only one queue */
68         if (is_kdump_kernel())
69                 nq = 1;
70
71         nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
72         return nq;
73 }
74
75 /**
76  * bnx2x_move_fp - move content of the fastpath structure.
77  *
78  * @bp:         driver handle
79  * @from:       source FP index
80  * @to:         destination FP index
81  *
82  * Makes sure the contents of the bp->fp[to].napi is kept
83  * intact. This is done by first copying the napi struct from
84  * the target to the source, and then mem copying the entire
85  * source onto the target. Update txdata pointers and related
86  * content.
87  */
88 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
89 {
90         struct bnx2x_fastpath *from_fp = &bp->fp[from];
91         struct bnx2x_fastpath *to_fp = &bp->fp[to];
92         struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
93         struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
94         struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
95         struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
96         int old_max_eth_txqs, new_max_eth_txqs;
97         int old_txdata_index = 0, new_txdata_index = 0;
98         struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
99
100         /* Copy the NAPI object as it has been already initialized */
101         from_fp->napi = to_fp->napi;
102
103         /* Move bnx2x_fastpath contents */
104         memcpy(to_fp, from_fp, sizeof(*to_fp));
105         to_fp->index = to;
106
107         /* Retain the tpa_info of the original `to' version as we don't want
108          * 2 FPs to contain the same tpa_info pointer.
109          */
110         to_fp->tpa_info = old_tpa_info;
111
112         /* move sp_objs contents as well, as their indices match fp ones */
113         memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
114
115         /* move fp_stats contents as well, as their indices match fp ones */
116         memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
117
118         /* Update txdata pointers in fp and move txdata content accordingly:
119          * Each fp consumes 'max_cos' txdata structures, so the index should be
120          * decremented by max_cos x delta.
121          */
122
123         old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
124         new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
125                                 (bp)->max_cos;
126         if (from == FCOE_IDX(bp)) {
127                 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
128                 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
129         }
130
131         memcpy(&bp->bnx2x_txq[new_txdata_index],
132                &bp->bnx2x_txq[old_txdata_index],
133                sizeof(struct bnx2x_fp_txdata));
134         to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
135 }
136
137 /**
138  * bnx2x_fill_fw_str - Fill buffer with FW version string.
139  *
140  * @bp:        driver handle
141  * @buf:       character buffer to fill with the fw name
142  * @buf_len:   length of the above buffer
143  *
144  */
145 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
146 {
147         if (IS_PF(bp)) {
148                 u8 phy_fw_ver[PHY_FW_VER_LEN];
149
150                 phy_fw_ver[0] = '\0';
151                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
152                                              phy_fw_ver, PHY_FW_VER_LEN);
153                 strlcpy(buf, bp->fw_ver, buf_len);
154                 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
155                          "bc %d.%d.%d%s%s",
156                          (bp->common.bc_ver & 0xff0000) >> 16,
157                          (bp->common.bc_ver & 0xff00) >> 8,
158                          (bp->common.bc_ver & 0xff),
159                          ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
160         } else {
161                 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
162         }
163 }
164
165 /**
166  * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
167  *
168  * @bp: driver handle
169  * @delta:      number of eth queues which were not allocated
170  */
171 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
172 {
173         int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
174
175         /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
176          * backward along the array could cause memory to be overridden
177          */
178         for (cos = 1; cos < bp->max_cos; cos++) {
179                 for (i = 0; i < old_eth_num - delta; i++) {
180                         struct bnx2x_fastpath *fp = &bp->fp[i];
181                         int new_idx = cos * (old_eth_num - delta) + i;
182
183                         memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
184                                sizeof(struct bnx2x_fp_txdata));
185                         fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
186                 }
187         }
188 }
189
190 int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
191
192 /* free skb in the packet ring at pos idx
193  * return idx of last bd freed
194  */
195 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
196                              u16 idx, unsigned int *pkts_compl,
197                              unsigned int *bytes_compl)
198 {
199         struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
200         struct eth_tx_start_bd *tx_start_bd;
201         struct eth_tx_bd *tx_data_bd;
202         struct sk_buff *skb = tx_buf->skb;
203         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
204         int nbd;
205         u16 split_bd_len = 0;
206
207         /* prefetch skb end pointer to speedup dev_kfree_skb() */
208         prefetch(&skb->end);
209
210         DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
211            txdata->txq_index, idx, tx_buf, skb);
212
213         tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
214
215         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
216 #ifdef BNX2X_STOP_ON_ERROR
217         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
218                 BNX2X_ERR("BAD nbd!\n");
219                 bnx2x_panic();
220         }
221 #endif
222         new_cons = nbd + tx_buf->first_bd;
223
224         /* Get the next bd */
225         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
226
227         /* Skip a parse bd... */
228         --nbd;
229         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
230
231         if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
232                 /* Skip second parse bd... */
233                 --nbd;
234                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
235         }
236
237         /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
238         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
239                 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
240                 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
241                 --nbd;
242                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
243         }
244
245         /* unmap first bd */
246         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
247                          BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
248                          DMA_TO_DEVICE);
249
250         /* now free frags */
251         while (nbd > 0) {
252
253                 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
254                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
255                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
256                 if (--nbd)
257                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
258         }
259
260         /* release skb */
261         WARN_ON(!skb);
262         if (likely(skb)) {
263                 (*pkts_compl)++;
264                 (*bytes_compl) += skb->len;
265                 dev_kfree_skb_any(skb);
266         }
267
268         tx_buf->first_bd = 0;
269         tx_buf->skb = NULL;
270
271         return new_cons;
272 }
273
274 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
275 {
276         struct netdev_queue *txq;
277         u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
278         unsigned int pkts_compl = 0, bytes_compl = 0;
279
280 #ifdef BNX2X_STOP_ON_ERROR
281         if (unlikely(bp->panic))
282                 return -1;
283 #endif
284
285         txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
286         hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
287         sw_cons = txdata->tx_pkt_cons;
288
289         while (sw_cons != hw_cons) {
290                 u16 pkt_cons;
291
292                 pkt_cons = TX_BD(sw_cons);
293
294                 DP(NETIF_MSG_TX_DONE,
295                    "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
296                    txdata->txq_index, hw_cons, sw_cons, pkt_cons);
297
298                 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
299                                             &pkts_compl, &bytes_compl);
300
301                 sw_cons++;
302         }
303
304         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
305
306         txdata->tx_pkt_cons = sw_cons;
307         txdata->tx_bd_cons = bd_cons;
308
309         /* Need to make the tx_bd_cons update visible to start_xmit()
310          * before checking for netif_tx_queue_stopped().  Without the
311          * memory barrier, there is a small possibility that
312          * start_xmit() will miss it and cause the queue to be stopped
313          * forever.
314          * On the other hand we need an rmb() here to ensure the proper
315          * ordering of bit testing in the following
316          * netif_tx_queue_stopped(txq) call.
317          */
318         smp_mb();
319
320         if (unlikely(netif_tx_queue_stopped(txq))) {
321                 /* Taking tx_lock() is needed to prevent re-enabling the queue
322                  * while it's empty. This could have happen if rx_action() gets
323                  * suspended in bnx2x_tx_int() after the condition before
324                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
325                  *
326                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
327                  * sends some packets consuming the whole queue again->
328                  * stops the queue
329                  */
330
331                 __netif_tx_lock(txq, smp_processor_id());
332
333                 if ((netif_tx_queue_stopped(txq)) &&
334                     (bp->state == BNX2X_STATE_OPEN) &&
335                     (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
336                         netif_tx_wake_queue(txq);
337
338                 __netif_tx_unlock(txq);
339         }
340         return 0;
341 }
342
343 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
344                                              u16 idx)
345 {
346         u16 last_max = fp->last_max_sge;
347
348         if (SUB_S16(idx, last_max) > 0)
349                 fp->last_max_sge = idx;
350 }
351
352 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
353                                          u16 sge_len,
354                                          struct eth_end_agg_rx_cqe *cqe)
355 {
356         struct bnx2x *bp = fp->bp;
357         u16 last_max, last_elem, first_elem;
358         u16 delta = 0;
359         u16 i;
360
361         if (!sge_len)
362                 return;
363
364         /* First mark all used pages */
365         for (i = 0; i < sge_len; i++)
366                 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
367                         RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
368
369         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
370            sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
371
372         /* Here we assume that the last SGE index is the biggest */
373         prefetch((void *)(fp->sge_mask));
374         bnx2x_update_last_max_sge(fp,
375                 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
376
377         last_max = RX_SGE(fp->last_max_sge);
378         last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
379         first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
380
381         /* If ring is not full */
382         if (last_elem + 1 != first_elem)
383                 last_elem++;
384
385         /* Now update the prod */
386         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
387                 if (likely(fp->sge_mask[i]))
388                         break;
389
390                 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
391                 delta += BIT_VEC64_ELEM_SZ;
392         }
393
394         if (delta > 0) {
395                 fp->rx_sge_prod += delta;
396                 /* clear page-end entries */
397                 bnx2x_clear_sge_mask_next_elems(fp);
398         }
399
400         DP(NETIF_MSG_RX_STATUS,
401            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
402            fp->last_max_sge, fp->rx_sge_prod);
403 }
404
405 /* Get Toeplitz hash value in the skb using the value from the
406  * CQE (calculated by HW).
407  */
408 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
409                             const struct eth_fast_path_rx_cqe *cqe,
410                             enum pkt_hash_types *rxhash_type)
411 {
412         /* Get Toeplitz hash from CQE */
413         if ((bp->dev->features & NETIF_F_RXHASH) &&
414             (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
415                 enum eth_rss_hash_type htype;
416
417                 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
418                 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
419                                 (htype == TCP_IPV6_HASH_TYPE)) ?
420                                PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
421
422                 return le32_to_cpu(cqe->rss_hash_result);
423         }
424         *rxhash_type = PKT_HASH_TYPE_NONE;
425         return 0;
426 }
427
428 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
429                             u16 cons, u16 prod,
430                             struct eth_fast_path_rx_cqe *cqe)
431 {
432         struct bnx2x *bp = fp->bp;
433         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
434         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
435         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
436         dma_addr_t mapping;
437         struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
438         struct sw_rx_bd *first_buf = &tpa_info->first_buf;
439
440         /* print error if current state != stop */
441         if (tpa_info->tpa_state != BNX2X_TPA_STOP)
442                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
443
444         /* Try to map an empty data buffer from the aggregation info  */
445         mapping = dma_map_single(&bp->pdev->dev,
446                                  first_buf->data + NET_SKB_PAD,
447                                  fp->rx_buf_size, DMA_FROM_DEVICE);
448         /*
449          *  ...if it fails - move the skb from the consumer to the producer
450          *  and set the current aggregation state as ERROR to drop it
451          *  when TPA_STOP arrives.
452          */
453
454         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
455                 /* Move the BD from the consumer to the producer */
456                 bnx2x_reuse_rx_data(fp, cons, prod);
457                 tpa_info->tpa_state = BNX2X_TPA_ERROR;
458                 return;
459         }
460
461         /* move empty data from pool to prod */
462         prod_rx_buf->data = first_buf->data;
463         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
464         /* point prod_bd to new data */
465         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
466         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
467
468         /* move partial skb from cons to pool (don't unmap yet) */
469         *first_buf = *cons_rx_buf;
470
471         /* mark bin state as START */
472         tpa_info->parsing_flags =
473                 le16_to_cpu(cqe->pars_flags.flags);
474         tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
475         tpa_info->tpa_state = BNX2X_TPA_START;
476         tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
477         tpa_info->placement_offset = cqe->placement_offset;
478         tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
479         if (fp->mode == TPA_MODE_GRO) {
480                 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
481                 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
482                 tpa_info->gro_size = gro_size;
483         }
484
485 #ifdef BNX2X_STOP_ON_ERROR
486         fp->tpa_queue_used |= (1 << queue);
487         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
488            fp->tpa_queue_used);
489 #endif
490 }
491
492 /* Timestamp option length allowed for TPA aggregation:
493  *
494  *              nop nop kind length echo val
495  */
496 #define TPA_TSTAMP_OPT_LEN      12
497 /**
498  * bnx2x_set_gro_params - compute GRO values
499  *
500  * @skb:                packet skb
501  * @parsing_flags:      parsing flags from the START CQE
502  * @len_on_bd:          total length of the first packet for the
503  *                      aggregation.
504  * @pkt_len:            length of all segments
505  *
506  * Approximate value of the MSS for this aggregation calculated using
507  * the first packet of it.
508  * Compute number of aggregated segments, and gso_type.
509  */
510 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
511                                  u16 len_on_bd, unsigned int pkt_len,
512                                  u16 num_of_coalesced_segs)
513 {
514         /* TPA aggregation won't have either IP options or TCP options
515          * other than timestamp or IPv6 extension headers.
516          */
517         u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
518
519         if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
520             PRS_FLAG_OVERETH_IPV6) {
521                 hdrs_len += sizeof(struct ipv6hdr);
522                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
523         } else {
524                 hdrs_len += sizeof(struct iphdr);
525                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
526         }
527
528         /* Check if there was a TCP timestamp, if there is it's will
529          * always be 12 bytes length: nop nop kind length echo val.
530          *
531          * Otherwise FW would close the aggregation.
532          */
533         if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
534                 hdrs_len += TPA_TSTAMP_OPT_LEN;
535
536         skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
537
538         /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
539          * to skb_shinfo(skb)->gso_segs
540          */
541         NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
542 }
543
544 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
545                               u16 index, gfp_t gfp_mask)
546 {
547         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
548         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
549         struct bnx2x_alloc_pool *pool = &fp->page_pool;
550         dma_addr_t mapping;
551
552         if (!pool->page) {
553                 pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
554                 if (unlikely(!pool->page))
555                         return -ENOMEM;
556
557                 pool->offset = 0;
558         }
559
560         mapping = dma_map_page(&bp->pdev->dev, pool->page,
561                                pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
562         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
563                 BNX2X_ERR("Can't map sge\n");
564                 return -ENOMEM;
565         }
566
567         sw_buf->page = pool->page;
568         sw_buf->offset = pool->offset;
569
570         dma_unmap_addr_set(sw_buf, mapping, mapping);
571
572         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
573         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
574
575         pool->offset += SGE_PAGE_SIZE;
576         if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE)
577                 get_page(pool->page);
578         else
579                 pool->page = NULL;
580         return 0;
581 }
582
583 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
584                                struct bnx2x_agg_info *tpa_info,
585                                u16 pages,
586                                struct sk_buff *skb,
587                                struct eth_end_agg_rx_cqe *cqe,
588                                u16 cqe_idx)
589 {
590         struct sw_rx_page *rx_pg, old_rx_pg;
591         u32 i, frag_len, frag_size;
592         int err, j, frag_id = 0;
593         u16 len_on_bd = tpa_info->len_on_bd;
594         u16 full_page = 0, gro_size = 0;
595
596         frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
597
598         if (fp->mode == TPA_MODE_GRO) {
599                 gro_size = tpa_info->gro_size;
600                 full_page = tpa_info->full_page;
601         }
602
603         /* This is needed in order to enable forwarding support */
604         if (frag_size)
605                 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
606                                      le16_to_cpu(cqe->pkt_len),
607                                      le16_to_cpu(cqe->num_of_coalesced_segs));
608
609 #ifdef BNX2X_STOP_ON_ERROR
610         if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
611                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
612                           pages, cqe_idx);
613                 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
614                 bnx2x_panic();
615                 return -EINVAL;
616         }
617 #endif
618
619         /* Run through the SGL and compose the fragmented skb */
620         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
621                 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
622
623                 /* FW gives the indices of the SGE as if the ring is an array
624                    (meaning that "next" element will consume 2 indices) */
625                 if (fp->mode == TPA_MODE_GRO)
626                         frag_len = min_t(u32, frag_size, (u32)full_page);
627                 else /* LRO */
628                         frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
629
630                 rx_pg = &fp->rx_page_ring[sge_idx];
631                 old_rx_pg = *rx_pg;
632
633                 /* If we fail to allocate a substitute page, we simply stop
634                    where we are and drop the whole packet */
635                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
636                 if (unlikely(err)) {
637                         bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
638                         return err;
639                 }
640
641                 dma_unmap_page(&bp->pdev->dev,
642                                dma_unmap_addr(&old_rx_pg, mapping),
643                                SGE_PAGE_SIZE, DMA_FROM_DEVICE);
644                 /* Add one frag and update the appropriate fields in the skb */
645                 if (fp->mode == TPA_MODE_LRO)
646                         skb_fill_page_desc(skb, j, old_rx_pg.page,
647                                            old_rx_pg.offset, frag_len);
648                 else { /* GRO */
649                         int rem;
650                         int offset = 0;
651                         for (rem = frag_len; rem > 0; rem -= gro_size) {
652                                 int len = rem > gro_size ? gro_size : rem;
653                                 skb_fill_page_desc(skb, frag_id++,
654                                                    old_rx_pg.page,
655                                                    old_rx_pg.offset + offset,
656                                                    len);
657                                 if (offset)
658                                         get_page(old_rx_pg.page);
659                                 offset += len;
660                         }
661                 }
662
663                 skb->data_len += frag_len;
664                 skb->truesize += SGE_PAGES;
665                 skb->len += frag_len;
666
667                 frag_size -= frag_len;
668         }
669
670         return 0;
671 }
672
673 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
674 {
675         if (fp->rx_frag_size)
676                 skb_free_frag(data);
677         else
678                 kfree(data);
679 }
680
681 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
682 {
683         if (fp->rx_frag_size) {
684                 /* GFP_KERNEL allocations are used only during initialization */
685                 if (unlikely(gfpflags_allow_blocking(gfp_mask)))
686                         return (void *)__get_free_page(gfp_mask);
687
688                 return netdev_alloc_frag(fp->rx_frag_size);
689         }
690
691         return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
692 }
693
694 #ifdef CONFIG_INET
695 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
696 {
697         const struct iphdr *iph = ip_hdr(skb);
698         struct tcphdr *th;
699
700         skb_set_transport_header(skb, sizeof(struct iphdr));
701         th = tcp_hdr(skb);
702
703         th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
704                                   iph->saddr, iph->daddr, 0);
705 }
706
707 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
708 {
709         struct ipv6hdr *iph = ipv6_hdr(skb);
710         struct tcphdr *th;
711
712         skb_set_transport_header(skb, sizeof(struct ipv6hdr));
713         th = tcp_hdr(skb);
714
715         th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
716                                   &iph->saddr, &iph->daddr, 0);
717 }
718
719 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
720                             void (*gro_func)(struct bnx2x*, struct sk_buff*))
721 {
722         skb_reset_network_header(skb);
723         gro_func(bp, skb);
724         tcp_gro_complete(skb);
725 }
726 #endif
727
728 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
729                                struct sk_buff *skb)
730 {
731 #ifdef CONFIG_INET
732         if (skb_shinfo(skb)->gso_size) {
733                 switch (be16_to_cpu(skb->protocol)) {
734                 case ETH_P_IP:
735                         bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
736                         break;
737                 case ETH_P_IPV6:
738                         bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
739                         break;
740                 default:
741                         WARN_ONCE(1, "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
742                                   be16_to_cpu(skb->protocol));
743                 }
744         }
745 #endif
746         skb_record_rx_queue(skb, fp->rx_queue);
747         napi_gro_receive(&fp->napi, skb);
748 }
749
750 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
751                            struct bnx2x_agg_info *tpa_info,
752                            u16 pages,
753                            struct eth_end_agg_rx_cqe *cqe,
754                            u16 cqe_idx)
755 {
756         struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
757         u8 pad = tpa_info->placement_offset;
758         u16 len = tpa_info->len_on_bd;
759         struct sk_buff *skb = NULL;
760         u8 *new_data, *data = rx_buf->data;
761         u8 old_tpa_state = tpa_info->tpa_state;
762
763         tpa_info->tpa_state = BNX2X_TPA_STOP;
764
765         /* If we there was an error during the handling of the TPA_START -
766          * drop this aggregation.
767          */
768         if (old_tpa_state == BNX2X_TPA_ERROR)
769                 goto drop;
770
771         /* Try to allocate the new data */
772         new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
773         /* Unmap skb in the pool anyway, as we are going to change
774            pool entry status to BNX2X_TPA_STOP even if new skb allocation
775            fails. */
776         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
777                          fp->rx_buf_size, DMA_FROM_DEVICE);
778         if (likely(new_data))
779                 skb = build_skb(data, fp->rx_frag_size);
780
781         if (likely(skb)) {
782 #ifdef BNX2X_STOP_ON_ERROR
783                 if (pad + len > fp->rx_buf_size) {
784                         BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
785                                   pad, len, fp->rx_buf_size);
786                         bnx2x_panic();
787                         return;
788                 }
789 #endif
790
791                 skb_reserve(skb, pad + NET_SKB_PAD);
792                 skb_put(skb, len);
793                 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
794
795                 skb->protocol = eth_type_trans(skb, bp->dev);
796                 skb->ip_summed = CHECKSUM_UNNECESSARY;
797
798                 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
799                                          skb, cqe, cqe_idx)) {
800                         if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
801                                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
802                         bnx2x_gro_receive(bp, fp, skb);
803                 } else {
804                         DP(NETIF_MSG_RX_STATUS,
805                            "Failed to allocate new pages - dropping packet!\n");
806                         dev_kfree_skb_any(skb);
807                 }
808
809                 /* put new data in bin */
810                 rx_buf->data = new_data;
811
812                 return;
813         }
814         if (new_data)
815                 bnx2x_frag_free(fp, new_data);
816 drop:
817         /* drop the packet and keep the buffer in the bin */
818         DP(NETIF_MSG_RX_STATUS,
819            "Failed to allocate or map a new skb - dropping packet!\n");
820         bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
821 }
822
823 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
824                                u16 index, gfp_t gfp_mask)
825 {
826         u8 *data;
827         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
828         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
829         dma_addr_t mapping;
830
831         data = bnx2x_frag_alloc(fp, gfp_mask);
832         if (unlikely(data == NULL))
833                 return -ENOMEM;
834
835         mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
836                                  fp->rx_buf_size,
837                                  DMA_FROM_DEVICE);
838         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
839                 bnx2x_frag_free(fp, data);
840                 BNX2X_ERR("Can't map rx data\n");
841                 return -ENOMEM;
842         }
843
844         rx_buf->data = data;
845         dma_unmap_addr_set(rx_buf, mapping, mapping);
846
847         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
848         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
849
850         return 0;
851 }
852
853 static
854 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
855                                  struct bnx2x_fastpath *fp,
856                                  struct bnx2x_eth_q_stats *qstats)
857 {
858         /* Do nothing if no L4 csum validation was done.
859          * We do not check whether IP csum was validated. For IPv4 we assume
860          * that if the card got as far as validating the L4 csum, it also
861          * validated the IP csum. IPv6 has no IP csum.
862          */
863         if (cqe->fast_path_cqe.status_flags &
864             ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
865                 return;
866
867         /* If L4 validation was done, check if an error was found. */
868
869         if (cqe->fast_path_cqe.type_error_flags &
870             (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
871              ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
872                 qstats->hw_csum_err++;
873         else
874                 skb->ip_summed = CHECKSUM_UNNECESSARY;
875 }
876
877 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
878 {
879         struct bnx2x *bp = fp->bp;
880         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
881         u16 sw_comp_cons, sw_comp_prod;
882         int rx_pkt = 0;
883         union eth_rx_cqe *cqe;
884         struct eth_fast_path_rx_cqe *cqe_fp;
885
886 #ifdef BNX2X_STOP_ON_ERROR
887         if (unlikely(bp->panic))
888                 return 0;
889 #endif
890         if (budget <= 0)
891                 return rx_pkt;
892
893         bd_cons = fp->rx_bd_cons;
894         bd_prod = fp->rx_bd_prod;
895         bd_prod_fw = bd_prod;
896         sw_comp_cons = fp->rx_comp_cons;
897         sw_comp_prod = fp->rx_comp_prod;
898
899         comp_ring_cons = RCQ_BD(sw_comp_cons);
900         cqe = &fp->rx_comp_ring[comp_ring_cons];
901         cqe_fp = &cqe->fast_path_cqe;
902
903         DP(NETIF_MSG_RX_STATUS,
904            "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
905
906         while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
907                 struct sw_rx_bd *rx_buf = NULL;
908                 struct sk_buff *skb;
909                 u8 cqe_fp_flags;
910                 enum eth_rx_cqe_type cqe_fp_type;
911                 u16 len, pad, queue;
912                 u8 *data;
913                 u32 rxhash;
914                 enum pkt_hash_types rxhash_type;
915
916 #ifdef BNX2X_STOP_ON_ERROR
917                 if (unlikely(bp->panic))
918                         return 0;
919 #endif
920
921                 bd_prod = RX_BD(bd_prod);
922                 bd_cons = RX_BD(bd_cons);
923
924                 /* A rmb() is required to ensure that the CQE is not read
925                  * before it is written by the adapter DMA.  PCI ordering
926                  * rules will make sure the other fields are written before
927                  * the marker at the end of struct eth_fast_path_rx_cqe
928                  * but without rmb() a weakly ordered processor can process
929                  * stale data.  Without the barrier TPA state-machine might
930                  * enter inconsistent state and kernel stack might be
931                  * provided with incorrect packet description - these lead
932                  * to various kernel crashed.
933                  */
934                 rmb();
935
936                 cqe_fp_flags = cqe_fp->type_error_flags;
937                 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
938
939                 DP(NETIF_MSG_RX_STATUS,
940                    "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
941                    CQE_TYPE(cqe_fp_flags),
942                    cqe_fp_flags, cqe_fp->status_flags,
943                    le32_to_cpu(cqe_fp->rss_hash_result),
944                    le16_to_cpu(cqe_fp->vlan_tag),
945                    le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
946
947                 /* is this a slowpath msg? */
948                 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
949                         bnx2x_sp_event(fp, cqe);
950                         goto next_cqe;
951                 }
952
953                 rx_buf = &fp->rx_buf_ring[bd_cons];
954                 data = rx_buf->data;
955
956                 if (!CQE_TYPE_FAST(cqe_fp_type)) {
957                         struct bnx2x_agg_info *tpa_info;
958                         u16 frag_size, pages;
959 #ifdef BNX2X_STOP_ON_ERROR
960                         /* sanity check */
961                         if (fp->mode == TPA_MODE_DISABLED &&
962                             (CQE_TYPE_START(cqe_fp_type) ||
963                              CQE_TYPE_STOP(cqe_fp_type)))
964                                 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
965                                           CQE_TYPE(cqe_fp_type));
966 #endif
967
968                         if (CQE_TYPE_START(cqe_fp_type)) {
969                                 u16 queue = cqe_fp->queue_index;
970                                 DP(NETIF_MSG_RX_STATUS,
971                                    "calling tpa_start on queue %d\n",
972                                    queue);
973
974                                 bnx2x_tpa_start(fp, queue,
975                                                 bd_cons, bd_prod,
976                                                 cqe_fp);
977
978                                 goto next_rx;
979                         }
980                         queue = cqe->end_agg_cqe.queue_index;
981                         tpa_info = &fp->tpa_info[queue];
982                         DP(NETIF_MSG_RX_STATUS,
983                            "calling tpa_stop on queue %d\n",
984                            queue);
985
986                         frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
987                                     tpa_info->len_on_bd;
988
989                         if (fp->mode == TPA_MODE_GRO)
990                                 pages = (frag_size + tpa_info->full_page - 1) /
991                                          tpa_info->full_page;
992                         else
993                                 pages = SGE_PAGE_ALIGN(frag_size) >>
994                                         SGE_PAGE_SHIFT;
995
996                         bnx2x_tpa_stop(bp, fp, tpa_info, pages,
997                                        &cqe->end_agg_cqe, comp_ring_cons);
998 #ifdef BNX2X_STOP_ON_ERROR
999                         if (bp->panic)
1000                                 return 0;
1001 #endif
1002
1003                         bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1004                         goto next_cqe;
1005                 }
1006                 /* non TPA */
1007                 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
1008                 pad = cqe_fp->placement_offset;
1009                 dma_sync_single_for_cpu(&bp->pdev->dev,
1010                                         dma_unmap_addr(rx_buf, mapping),
1011                                         pad + RX_COPY_THRESH,
1012                                         DMA_FROM_DEVICE);
1013                 pad += NET_SKB_PAD;
1014                 prefetch(data + pad); /* speedup eth_type_trans() */
1015                 /* is this an error packet? */
1016                 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1017                         DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1018                            "ERROR  flags %x  rx packet %u\n",
1019                            cqe_fp_flags, sw_comp_cons);
1020                         bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1021                         goto reuse_rx;
1022                 }
1023
1024                 /* Since we don't have a jumbo ring
1025                  * copy small packets if mtu > 1500
1026                  */
1027                 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1028                     (len <= RX_COPY_THRESH)) {
1029                         skb = napi_alloc_skb(&fp->napi, len);
1030                         if (skb == NULL) {
1031                                 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1032                                    "ERROR  packet dropped because of alloc failure\n");
1033                                 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1034                                 goto reuse_rx;
1035                         }
1036                         memcpy(skb->data, data + pad, len);
1037                         bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1038                 } else {
1039                         if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1040                                                        GFP_ATOMIC) == 0)) {
1041                                 dma_unmap_single(&bp->pdev->dev,
1042                                                  dma_unmap_addr(rx_buf, mapping),
1043                                                  fp->rx_buf_size,
1044                                                  DMA_FROM_DEVICE);
1045                                 skb = build_skb(data, fp->rx_frag_size);
1046                                 if (unlikely(!skb)) {
1047                                         bnx2x_frag_free(fp, data);
1048                                         bnx2x_fp_qstats(bp, fp)->
1049                                                         rx_skb_alloc_failed++;
1050                                         goto next_rx;
1051                                 }
1052                                 skb_reserve(skb, pad);
1053                         } else {
1054                                 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1055                                    "ERROR  packet dropped because of alloc failure\n");
1056                                 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1057 reuse_rx:
1058                                 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1059                                 goto next_rx;
1060                         }
1061                 }
1062
1063                 skb_put(skb, len);
1064                 skb->protocol = eth_type_trans(skb, bp->dev);
1065
1066                 /* Set Toeplitz hash for a none-LRO skb */
1067                 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1068                 skb_set_hash(skb, rxhash, rxhash_type);
1069
1070                 skb_checksum_none_assert(skb);
1071
1072                 if (bp->dev->features & NETIF_F_RXCSUM)
1073                         bnx2x_csum_validate(skb, cqe, fp,
1074                                             bnx2x_fp_qstats(bp, fp));
1075
1076                 skb_record_rx_queue(skb, fp->rx_queue);
1077
1078                 /* Check if this packet was timestamped */
1079                 if (unlikely(cqe->fast_path_cqe.type_error_flags &
1080                              (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1081                         bnx2x_set_rx_ts(bp, skb);
1082
1083                 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1084                     PARSING_FLAGS_VLAN)
1085                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1086                                                le16_to_cpu(cqe_fp->vlan_tag));
1087
1088                 napi_gro_receive(&fp->napi, skb);
1089 next_rx:
1090                 rx_buf->data = NULL;
1091
1092                 bd_cons = NEXT_RX_IDX(bd_cons);
1093                 bd_prod = NEXT_RX_IDX(bd_prod);
1094                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1095                 rx_pkt++;
1096 next_cqe:
1097                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1098                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1099
1100                 /* mark CQE as free */
1101                 BNX2X_SEED_CQE(cqe_fp);
1102
1103                 if (rx_pkt == budget)
1104                         break;
1105
1106                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1107                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1108                 cqe_fp = &cqe->fast_path_cqe;
1109         } /* while */
1110
1111         fp->rx_bd_cons = bd_cons;
1112         fp->rx_bd_prod = bd_prod_fw;
1113         fp->rx_comp_cons = sw_comp_cons;
1114         fp->rx_comp_prod = sw_comp_prod;
1115
1116         /* Update producers */
1117         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1118                              fp->rx_sge_prod);
1119
1120         return rx_pkt;
1121 }
1122
1123 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1124 {
1125         struct bnx2x_fastpath *fp = fp_cookie;
1126         struct bnx2x *bp = fp->bp;
1127         u8 cos;
1128
1129         DP(NETIF_MSG_INTR,
1130            "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1131            fp->index, fp->fw_sb_id, fp->igu_sb_id);
1132
1133         bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1134
1135 #ifdef BNX2X_STOP_ON_ERROR
1136         if (unlikely(bp->panic))
1137                 return IRQ_HANDLED;
1138 #endif
1139
1140         /* Handle Rx and Tx according to MSI-X vector */
1141         for_each_cos_in_tx_queue(fp, cos)
1142                 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1143
1144         prefetch(&fp->sb_running_index[SM_RX_ID]);
1145         napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1146
1147         return IRQ_HANDLED;
1148 }
1149
1150 /* HW Lock for shared dual port PHYs */
1151 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1152 {
1153         mutex_lock(&bp->port.phy_mutex);
1154
1155         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1156 }
1157
1158 void bnx2x_release_phy_lock(struct bnx2x *bp)
1159 {
1160         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1161
1162         mutex_unlock(&bp->port.phy_mutex);
1163 }
1164
1165 /* calculates MF speed according to current linespeed and MF configuration */
1166 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1167 {
1168         u16 line_speed = bp->link_vars.line_speed;
1169         if (IS_MF(bp)) {
1170                 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1171                                                    bp->mf_config[BP_VN(bp)]);
1172
1173                 /* Calculate the current MAX line speed limit for the MF
1174                  * devices
1175                  */
1176                 if (IS_MF_PERCENT_BW(bp))
1177                         line_speed = (line_speed * maxCfg) / 100;
1178                 else { /* SD mode */
1179                         u16 vn_max_rate = maxCfg * 100;
1180
1181                         if (vn_max_rate < line_speed)
1182                                 line_speed = vn_max_rate;
1183                 }
1184         }
1185
1186         return line_speed;
1187 }
1188
1189 /**
1190  * bnx2x_fill_report_data - fill link report data to report
1191  *
1192  * @bp:         driver handle
1193  * @data:       link state to update
1194  *
1195  * It uses a none-atomic bit operations because is called under the mutex.
1196  */
1197 static void bnx2x_fill_report_data(struct bnx2x *bp,
1198                                    struct bnx2x_link_report_data *data)
1199 {
1200         memset(data, 0, sizeof(*data));
1201
1202         if (IS_PF(bp)) {
1203                 /* Fill the report data: effective line speed */
1204                 data->line_speed = bnx2x_get_mf_speed(bp);
1205
1206                 /* Link is down */
1207                 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1208                         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1209                                   &data->link_report_flags);
1210
1211                 if (!BNX2X_NUM_ETH_QUEUES(bp))
1212                         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1213                                   &data->link_report_flags);
1214
1215                 /* Full DUPLEX */
1216                 if (bp->link_vars.duplex == DUPLEX_FULL)
1217                         __set_bit(BNX2X_LINK_REPORT_FD,
1218                                   &data->link_report_flags);
1219
1220                 /* Rx Flow Control is ON */
1221                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1222                         __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1223                                   &data->link_report_flags);
1224
1225                 /* Tx Flow Control is ON */
1226                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1227                         __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1228                                   &data->link_report_flags);
1229         } else { /* VF */
1230                 *data = bp->vf_link_vars;
1231         }
1232 }
1233
1234 /**
1235  * bnx2x_link_report - report link status to OS.
1236  *
1237  * @bp:         driver handle
1238  *
1239  * Calls the __bnx2x_link_report() under the same locking scheme
1240  * as a link/PHY state managing code to ensure a consistent link
1241  * reporting.
1242  */
1243
1244 void bnx2x_link_report(struct bnx2x *bp)
1245 {
1246         bnx2x_acquire_phy_lock(bp);
1247         __bnx2x_link_report(bp);
1248         bnx2x_release_phy_lock(bp);
1249 }
1250
1251 /**
1252  * __bnx2x_link_report - report link status to OS.
1253  *
1254  * @bp:         driver handle
1255  *
1256  * None atomic implementation.
1257  * Should be called under the phy_lock.
1258  */
1259 void __bnx2x_link_report(struct bnx2x *bp)
1260 {
1261         struct bnx2x_link_report_data cur_data;
1262
1263         /* reread mf_cfg */
1264         if (IS_PF(bp) && !CHIP_IS_E1(bp))
1265                 bnx2x_read_mf_cfg(bp);
1266
1267         /* Read the current link report info */
1268         bnx2x_fill_report_data(bp, &cur_data);
1269
1270         /* Don't report link down or exactly the same link status twice */
1271         if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1272             (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1273                       &bp->last_reported_link.link_report_flags) &&
1274              test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1275                       &cur_data.link_report_flags)))
1276                 return;
1277
1278         bp->link_cnt++;
1279
1280         /* We are going to report a new link parameters now -
1281          * remember the current data for the next time.
1282          */
1283         memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1284
1285         /* propagate status to VFs */
1286         if (IS_PF(bp))
1287                 bnx2x_iov_link_update(bp);
1288
1289         if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1290                      &cur_data.link_report_flags)) {
1291                 netif_carrier_off(bp->dev);
1292                 netdev_err(bp->dev, "NIC Link is Down\n");
1293                 return;
1294         } else {
1295                 const char *duplex;
1296                 const char *flow;
1297
1298                 netif_carrier_on(bp->dev);
1299
1300                 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1301                                        &cur_data.link_report_flags))
1302                         duplex = "full";
1303                 else
1304                         duplex = "half";
1305
1306                 /* Handle the FC at the end so that only these flags would be
1307                  * possibly set. This way we may easily check if there is no FC
1308                  * enabled.
1309                  */
1310                 if (cur_data.link_report_flags) {
1311                         if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1312                                      &cur_data.link_report_flags)) {
1313                                 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1314                                      &cur_data.link_report_flags))
1315                                         flow = "ON - receive & transmit";
1316                                 else
1317                                         flow = "ON - receive";
1318                         } else {
1319                                 flow = "ON - transmit";
1320                         }
1321                 } else {
1322                         flow = "none";
1323                 }
1324                 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1325                             cur_data.line_speed, duplex, flow);
1326         }
1327 }
1328
1329 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1330 {
1331         int i;
1332
1333         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1334                 struct eth_rx_sge *sge;
1335
1336                 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1337                 sge->addr_hi =
1338                         cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1339                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1340
1341                 sge->addr_lo =
1342                         cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1343                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1344         }
1345 }
1346
1347 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1348                                 struct bnx2x_fastpath *fp, int last)
1349 {
1350         int i;
1351
1352         for (i = 0; i < last; i++) {
1353                 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1354                 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1355                 u8 *data = first_buf->data;
1356
1357                 if (data == NULL) {
1358                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1359                         continue;
1360                 }
1361                 if (tpa_info->tpa_state == BNX2X_TPA_START)
1362                         dma_unmap_single(&bp->pdev->dev,
1363                                          dma_unmap_addr(first_buf, mapping),
1364                                          fp->rx_buf_size, DMA_FROM_DEVICE);
1365                 bnx2x_frag_free(fp, data);
1366                 first_buf->data = NULL;
1367         }
1368 }
1369
1370 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1371 {
1372         int j;
1373
1374         for_each_rx_queue_cnic(bp, j) {
1375                 struct bnx2x_fastpath *fp = &bp->fp[j];
1376
1377                 fp->rx_bd_cons = 0;
1378
1379                 /* Activate BD ring */
1380                 /* Warning!
1381                  * this will generate an interrupt (to the TSTORM)
1382                  * must only be done after chip is initialized
1383                  */
1384                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1385                                      fp->rx_sge_prod);
1386         }
1387 }
1388
1389 void bnx2x_init_rx_rings(struct bnx2x *bp)
1390 {
1391         int func = BP_FUNC(bp);
1392         u16 ring_prod;
1393         int i, j;
1394
1395         /* Allocate TPA resources */
1396         for_each_eth_queue(bp, j) {
1397                 struct bnx2x_fastpath *fp = &bp->fp[j];
1398
1399                 DP(NETIF_MSG_IFUP,
1400                    "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1401
1402                 if (fp->mode != TPA_MODE_DISABLED) {
1403                         /* Fill the per-aggregation pool */
1404                         for (i = 0; i < MAX_AGG_QS(bp); i++) {
1405                                 struct bnx2x_agg_info *tpa_info =
1406                                         &fp->tpa_info[i];
1407                                 struct sw_rx_bd *first_buf =
1408                                         &tpa_info->first_buf;
1409
1410                                 first_buf->data =
1411                                         bnx2x_frag_alloc(fp, GFP_KERNEL);
1412                                 if (!first_buf->data) {
1413                                         BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1414                                                   j);
1415                                         bnx2x_free_tpa_pool(bp, fp, i);
1416                                         fp->mode = TPA_MODE_DISABLED;
1417                                         break;
1418                                 }
1419                                 dma_unmap_addr_set(first_buf, mapping, 0);
1420                                 tpa_info->tpa_state = BNX2X_TPA_STOP;
1421                         }
1422
1423                         /* "next page" elements initialization */
1424                         bnx2x_set_next_page_sgl(fp);
1425
1426                         /* set SGEs bit mask */
1427                         bnx2x_init_sge_ring_bit_mask(fp);
1428
1429                         /* Allocate SGEs and initialize the ring elements */
1430                         for (i = 0, ring_prod = 0;
1431                              i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1432
1433                                 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1434                                                        GFP_KERNEL) < 0) {
1435                                         BNX2X_ERR("was only able to allocate %d rx sges\n",
1436                                                   i);
1437                                         BNX2X_ERR("disabling TPA for queue[%d]\n",
1438                                                   j);
1439                                         /* Cleanup already allocated elements */
1440                                         bnx2x_free_rx_sge_range(bp, fp,
1441                                                                 ring_prod);
1442                                         bnx2x_free_tpa_pool(bp, fp,
1443                                                             MAX_AGG_QS(bp));
1444                                         fp->mode = TPA_MODE_DISABLED;
1445                                         ring_prod = 0;
1446                                         break;
1447                                 }
1448                                 ring_prod = NEXT_SGE_IDX(ring_prod);
1449                         }
1450
1451                         fp->rx_sge_prod = ring_prod;
1452                 }
1453         }
1454
1455         for_each_eth_queue(bp, j) {
1456                 struct bnx2x_fastpath *fp = &bp->fp[j];
1457
1458                 fp->rx_bd_cons = 0;
1459
1460                 /* Activate BD ring */
1461                 /* Warning!
1462                  * this will generate an interrupt (to the TSTORM)
1463                  * must only be done after chip is initialized
1464                  */
1465                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1466                                      fp->rx_sge_prod);
1467
1468                 if (j != 0)
1469                         continue;
1470
1471                 if (CHIP_IS_E1(bp)) {
1472                         REG_WR(bp, BAR_USTRORM_INTMEM +
1473                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1474                                U64_LO(fp->rx_comp_mapping));
1475                         REG_WR(bp, BAR_USTRORM_INTMEM +
1476                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1477                                U64_HI(fp->rx_comp_mapping));
1478                 }
1479         }
1480 }
1481
1482 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1483 {
1484         u8 cos;
1485         struct bnx2x *bp = fp->bp;
1486
1487         for_each_cos_in_tx_queue(fp, cos) {
1488                 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1489                 unsigned pkts_compl = 0, bytes_compl = 0;
1490
1491                 u16 sw_prod = txdata->tx_pkt_prod;
1492                 u16 sw_cons = txdata->tx_pkt_cons;
1493
1494                 while (sw_cons != sw_prod) {
1495                         bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1496                                           &pkts_compl, &bytes_compl);
1497                         sw_cons++;
1498                 }
1499
1500                 netdev_tx_reset_queue(
1501                         netdev_get_tx_queue(bp->dev,
1502                                             txdata->txq_index));
1503         }
1504 }
1505
1506 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1507 {
1508         int i;
1509
1510         for_each_tx_queue_cnic(bp, i) {
1511                 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1512         }
1513 }
1514
1515 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1516 {
1517         int i;
1518
1519         for_each_eth_queue(bp, i) {
1520                 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1521         }
1522 }
1523
1524 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1525 {
1526         struct bnx2x *bp = fp->bp;
1527         int i;
1528
1529         /* ring wasn't allocated */
1530         if (fp->rx_buf_ring == NULL)
1531                 return;
1532
1533         for (i = 0; i < NUM_RX_BD; i++) {
1534                 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1535                 u8 *data = rx_buf->data;
1536
1537                 if (data == NULL)
1538                         continue;
1539                 dma_unmap_single(&bp->pdev->dev,
1540                                  dma_unmap_addr(rx_buf, mapping),
1541                                  fp->rx_buf_size, DMA_FROM_DEVICE);
1542
1543                 rx_buf->data = NULL;
1544                 bnx2x_frag_free(fp, data);
1545         }
1546 }
1547
1548 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1549 {
1550         int j;
1551
1552         for_each_rx_queue_cnic(bp, j) {
1553                 bnx2x_free_rx_bds(&bp->fp[j]);
1554         }
1555 }
1556
1557 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1558 {
1559         int j;
1560
1561         for_each_eth_queue(bp, j) {
1562                 struct bnx2x_fastpath *fp = &bp->fp[j];
1563
1564                 bnx2x_free_rx_bds(fp);
1565
1566                 if (fp->mode != TPA_MODE_DISABLED)
1567                         bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1568         }
1569 }
1570
1571 static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1572 {
1573         bnx2x_free_tx_skbs_cnic(bp);
1574         bnx2x_free_rx_skbs_cnic(bp);
1575 }
1576
1577 void bnx2x_free_skbs(struct bnx2x *bp)
1578 {
1579         bnx2x_free_tx_skbs(bp);
1580         bnx2x_free_rx_skbs(bp);
1581 }
1582
1583 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1584 {
1585         /* load old values */
1586         u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1587
1588         if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1589                 /* leave all but MAX value */
1590                 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1591
1592                 /* set new MAX value */
1593                 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1594                                 & FUNC_MF_CFG_MAX_BW_MASK;
1595
1596                 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1597         }
1598 }
1599
1600 /**
1601  * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1602  *
1603  * @bp:         driver handle
1604  * @nvecs:      number of vectors to be released
1605  */
1606 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1607 {
1608         int i, offset = 0;
1609
1610         if (nvecs == offset)
1611                 return;
1612
1613         /* VFs don't have a default SB */
1614         if (IS_PF(bp)) {
1615                 free_irq(bp->msix_table[offset].vector, bp->dev);
1616                 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1617                    bp->msix_table[offset].vector);
1618                 offset++;
1619         }
1620
1621         if (CNIC_SUPPORT(bp)) {
1622                 if (nvecs == offset)
1623                         return;
1624                 offset++;
1625         }
1626
1627         for_each_eth_queue(bp, i) {
1628                 if (nvecs == offset)
1629                         return;
1630                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1631                    i, bp->msix_table[offset].vector);
1632
1633                 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1634         }
1635 }
1636
1637 void bnx2x_free_irq(struct bnx2x *bp)
1638 {
1639         if (bp->flags & USING_MSIX_FLAG &&
1640             !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1641                 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1642
1643                 /* vfs don't have a default status block */
1644                 if (IS_PF(bp))
1645                         nvecs++;
1646
1647                 bnx2x_free_msix_irqs(bp, nvecs);
1648         } else {
1649                 free_irq(bp->dev->irq, bp->dev);
1650         }
1651 }
1652
1653 int bnx2x_enable_msix(struct bnx2x *bp)
1654 {
1655         int msix_vec = 0, i, rc;
1656
1657         /* VFs don't have a default status block */
1658         if (IS_PF(bp)) {
1659                 bp->msix_table[msix_vec].entry = msix_vec;
1660                 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1661                                bp->msix_table[0].entry);
1662                 msix_vec++;
1663         }
1664
1665         /* Cnic requires an msix vector for itself */
1666         if (CNIC_SUPPORT(bp)) {
1667                 bp->msix_table[msix_vec].entry = msix_vec;
1668                 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1669                                msix_vec, bp->msix_table[msix_vec].entry);
1670                 msix_vec++;
1671         }
1672
1673         /* We need separate vectors for ETH queues only (not FCoE) */
1674         for_each_eth_queue(bp, i) {
1675                 bp->msix_table[msix_vec].entry = msix_vec;
1676                 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1677                                msix_vec, msix_vec, i);
1678                 msix_vec++;
1679         }
1680
1681         DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1682            msix_vec);
1683
1684         rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1685                                    BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1686         /*
1687          * reconfigure number of tx/rx queues according to available
1688          * MSI-X vectors
1689          */
1690         if (rc == -ENOSPC) {
1691                 /* Get by with single vector */
1692                 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1693                 if (rc < 0) {
1694                         BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1695                                        rc);
1696                         goto no_msix;
1697                 }
1698
1699                 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1700                 bp->flags |= USING_SINGLE_MSIX_FLAG;
1701
1702                 BNX2X_DEV_INFO("set number of queues to 1\n");
1703                 bp->num_ethernet_queues = 1;
1704                 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1705         } else if (rc < 0) {
1706                 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1707                 goto no_msix;
1708         } else if (rc < msix_vec) {
1709                 /* how less vectors we will have? */
1710                 int diff = msix_vec - rc;
1711
1712                 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1713
1714                 /*
1715                  * decrease number of queues by number of unallocated entries
1716                  */
1717                 bp->num_ethernet_queues -= diff;
1718                 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1719
1720                 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1721                                bp->num_queues);
1722         }
1723
1724         bp->flags |= USING_MSIX_FLAG;
1725
1726         return 0;
1727
1728 no_msix:
1729         /* fall to INTx if not enough memory */
1730         if (rc == -ENOMEM)
1731                 bp->flags |= DISABLE_MSI_FLAG;
1732
1733         return rc;
1734 }
1735
1736 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1737 {
1738         int i, rc, offset = 0;
1739
1740         /* no default status block for vf */
1741         if (IS_PF(bp)) {
1742                 rc = request_irq(bp->msix_table[offset++].vector,
1743                                  bnx2x_msix_sp_int, 0,
1744                                  bp->dev->name, bp->dev);
1745                 if (rc) {
1746                         BNX2X_ERR("request sp irq failed\n");
1747                         return -EBUSY;
1748                 }
1749         }
1750
1751         if (CNIC_SUPPORT(bp))
1752                 offset++;
1753
1754         for_each_eth_queue(bp, i) {
1755                 struct bnx2x_fastpath *fp = &bp->fp[i];
1756                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1757                          bp->dev->name, i);
1758
1759                 rc = request_irq(bp->msix_table[offset].vector,
1760                                  bnx2x_msix_fp_int, 0, fp->name, fp);
1761                 if (rc) {
1762                         BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
1763                               bp->msix_table[offset].vector, rc);
1764                         bnx2x_free_msix_irqs(bp, offset);
1765                         return -EBUSY;
1766                 }
1767
1768                 offset++;
1769         }
1770
1771         i = BNX2X_NUM_ETH_QUEUES(bp);
1772         if (IS_PF(bp)) {
1773                 offset = 1 + CNIC_SUPPORT(bp);
1774                 netdev_info(bp->dev,
1775                             "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
1776                             bp->msix_table[0].vector,
1777                             0, bp->msix_table[offset].vector,
1778                             i - 1, bp->msix_table[offset + i - 1].vector);
1779         } else {
1780                 offset = CNIC_SUPPORT(bp);
1781                 netdev_info(bp->dev,
1782                             "using MSI-X  IRQs: fp[%d] %d ... fp[%d] %d\n",
1783                             0, bp->msix_table[offset].vector,
1784                             i - 1, bp->msix_table[offset + i - 1].vector);
1785         }
1786         return 0;
1787 }
1788
1789 int bnx2x_enable_msi(struct bnx2x *bp)
1790 {
1791         int rc;
1792
1793         rc = pci_enable_msi(bp->pdev);
1794         if (rc) {
1795                 BNX2X_DEV_INFO("MSI is not attainable\n");
1796                 return -1;
1797         }
1798         bp->flags |= USING_MSI_FLAG;
1799
1800         return 0;
1801 }
1802
1803 static int bnx2x_req_irq(struct bnx2x *bp)
1804 {
1805         unsigned long flags;
1806         unsigned int irq;
1807
1808         if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1809                 flags = 0;
1810         else
1811                 flags = IRQF_SHARED;
1812
1813         if (bp->flags & USING_MSIX_FLAG)
1814                 irq = bp->msix_table[0].vector;
1815         else
1816                 irq = bp->pdev->irq;
1817
1818         return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1819 }
1820
1821 static int bnx2x_setup_irqs(struct bnx2x *bp)
1822 {
1823         int rc = 0;
1824         if (bp->flags & USING_MSIX_FLAG &&
1825             !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1826                 rc = bnx2x_req_msix_irqs(bp);
1827                 if (rc)
1828                         return rc;
1829         } else {
1830                 rc = bnx2x_req_irq(bp);
1831                 if (rc) {
1832                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
1833                         return rc;
1834                 }
1835                 if (bp->flags & USING_MSI_FLAG) {
1836                         bp->dev->irq = bp->pdev->irq;
1837                         netdev_info(bp->dev, "using MSI IRQ %d\n",
1838                                     bp->dev->irq);
1839                 }
1840                 if (bp->flags & USING_MSIX_FLAG) {
1841                         bp->dev->irq = bp->msix_table[0].vector;
1842                         netdev_info(bp->dev, "using MSIX IRQ %d\n",
1843                                     bp->dev->irq);
1844                 }
1845         }
1846
1847         return 0;
1848 }
1849
1850 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1851 {
1852         int i;
1853
1854         for_each_rx_queue_cnic(bp, i) {
1855                 napi_enable(&bnx2x_fp(bp, i, napi));
1856         }
1857 }
1858
1859 static void bnx2x_napi_enable(struct bnx2x *bp)
1860 {
1861         int i;
1862
1863         for_each_eth_queue(bp, i) {
1864                 napi_enable(&bnx2x_fp(bp, i, napi));
1865         }
1866 }
1867
1868 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1869 {
1870         int i;
1871
1872         for_each_rx_queue_cnic(bp, i) {
1873                 napi_disable(&bnx2x_fp(bp, i, napi));
1874         }
1875 }
1876
1877 static void bnx2x_napi_disable(struct bnx2x *bp)
1878 {
1879         int i;
1880
1881         for_each_eth_queue(bp, i) {
1882                 napi_disable(&bnx2x_fp(bp, i, napi));
1883         }
1884 }
1885
1886 void bnx2x_netif_start(struct bnx2x *bp)
1887 {
1888         if (netif_running(bp->dev)) {
1889                 bnx2x_napi_enable(bp);
1890                 if (CNIC_LOADED(bp))
1891                         bnx2x_napi_enable_cnic(bp);
1892                 bnx2x_int_enable(bp);
1893                 if (bp->state == BNX2X_STATE_OPEN)
1894                         netif_tx_wake_all_queues(bp->dev);
1895         }
1896 }
1897
1898 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1899 {
1900         bnx2x_int_disable_sync(bp, disable_hw);
1901         bnx2x_napi_disable(bp);
1902         if (CNIC_LOADED(bp))
1903                 bnx2x_napi_disable_cnic(bp);
1904 }
1905
1906 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1907                        void *accel_priv, select_queue_fallback_t fallback)
1908 {
1909         struct bnx2x *bp = netdev_priv(dev);
1910
1911         if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1912                 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1913                 u16 ether_type = ntohs(hdr->h_proto);
1914
1915                 /* Skip VLAN tag if present */
1916                 if (ether_type == ETH_P_8021Q) {
1917                         struct vlan_ethhdr *vhdr =
1918                                 (struct vlan_ethhdr *)skb->data;
1919
1920                         ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1921                 }
1922
1923                 /* If ethertype is FCoE or FIP - use FCoE ring */
1924                 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1925                         return bnx2x_fcoe_tx(bp, txq_index);
1926         }
1927
1928         /* select a non-FCoE queue */
1929         return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
1930 }
1931
1932 void bnx2x_set_num_queues(struct bnx2x *bp)
1933 {
1934         /* RSS queues */
1935         bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1936
1937         /* override in STORAGE SD modes */
1938         if (IS_MF_STORAGE_ONLY(bp))
1939                 bp->num_ethernet_queues = 1;
1940
1941         /* Add special queues */
1942         bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1943         bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1944
1945         BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1946 }
1947
1948 /**
1949  * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1950  *
1951  * @bp:         Driver handle
1952  *
1953  * We currently support for at most 16 Tx queues for each CoS thus we will
1954  * allocate a multiple of 16 for ETH L2 rings according to the value of the
1955  * bp->max_cos.
1956  *
1957  * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1958  * index after all ETH L2 indices.
1959  *
1960  * If the actual number of Tx queues (for each CoS) is less than 16 then there
1961  * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1962  * 16..31,...) with indices that are not coupled with any real Tx queue.
1963  *
1964  * The proper configuration of skb->queue_mapping is handled by
1965  * bnx2x_select_queue() and __skb_tx_hash().
1966  *
1967  * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1968  * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1969  */
1970 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1971 {
1972         int rc, tx, rx;
1973
1974         tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1975         rx = BNX2X_NUM_ETH_QUEUES(bp);
1976
1977 /* account for fcoe queue */
1978         if (include_cnic && !NO_FCOE(bp)) {
1979                 rx++;
1980                 tx++;
1981         }
1982
1983         rc = netif_set_real_num_tx_queues(bp->dev, tx);
1984         if (rc) {
1985                 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1986                 return rc;
1987         }
1988         rc = netif_set_real_num_rx_queues(bp->dev, rx);
1989         if (rc) {
1990                 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1991                 return rc;
1992         }
1993
1994         DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1995                           tx, rx);
1996
1997         return rc;
1998 }
1999
2000 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2001 {
2002         int i;
2003
2004         for_each_queue(bp, i) {
2005                 struct bnx2x_fastpath *fp = &bp->fp[i];
2006                 u32 mtu;
2007
2008                 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2009                 if (IS_FCOE_IDX(i))
2010                         /*
2011                          * Although there are no IP frames expected to arrive to
2012                          * this ring we still want to add an
2013                          * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2014                          * overrun attack.
2015                          */
2016                         mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2017                 else
2018                         mtu = bp->dev->mtu;
2019                 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2020                                   IP_HEADER_ALIGNMENT_PADDING +
2021                                   ETH_OVERHEAD +
2022                                   mtu +
2023                                   BNX2X_FW_RX_ALIGN_END;
2024                 fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
2025                 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2026                 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2027                         fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2028                 else
2029                         fp->rx_frag_size = 0;
2030         }
2031 }
2032
2033 static int bnx2x_init_rss(struct bnx2x *bp)
2034 {
2035         int i;
2036         u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2037
2038         /* Prepare the initial contents for the indirection table if RSS is
2039          * enabled
2040          */
2041         for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2042                 bp->rss_conf_obj.ind_table[i] =
2043                         bp->fp->cl_id +
2044                         ethtool_rxfh_indir_default(i, num_eth_queues);
2045
2046         /*
2047          * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2048          * per-port, so if explicit configuration is needed , do it only
2049          * for a PMF.
2050          *
2051          * For 57712 and newer on the other hand it's a per-function
2052          * configuration.
2053          */
2054         return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2055 }
2056
2057 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2058               bool config_hash, bool enable)
2059 {
2060         struct bnx2x_config_rss_params params = {NULL};
2061
2062         /* Although RSS is meaningless when there is a single HW queue we
2063          * still need it enabled in order to have HW Rx hash generated.
2064          *
2065          * if (!is_eth_multi(bp))
2066          *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
2067          */
2068
2069         params.rss_obj = rss_obj;
2070
2071         __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2072
2073         if (enable) {
2074                 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
2075
2076                 /* RSS configuration */
2077                 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2078                 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2079                 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2080                 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2081                 if (rss_obj->udp_rss_v4)
2082                         __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2083                 if (rss_obj->udp_rss_v6)
2084                         __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2085
2086                 if (!CHIP_IS_E1x(bp)) {
2087                         /* valid only for TUNN_MODE_VXLAN tunnel mode */
2088                         __set_bit(BNX2X_RSS_IPV4_VXLAN, &params.rss_flags);
2089                         __set_bit(BNX2X_RSS_IPV6_VXLAN, &params.rss_flags);
2090
2091                         /* valid only for TUNN_MODE_GRE tunnel mode */
2092                         __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, &params.rss_flags);
2093                 }
2094         } else {
2095                 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2096         }
2097
2098         /* Hash bits */
2099         params.rss_result_mask = MULTI_MASK;
2100
2101         memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2102
2103         if (config_hash) {
2104                 /* RSS keys */
2105                 netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
2106                 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
2107         }
2108
2109         if (IS_PF(bp))
2110                 return bnx2x_config_rss(bp, &params);
2111         else
2112                 return bnx2x_vfpf_config_rss(bp, &params);
2113 }
2114
2115 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2116 {
2117         struct bnx2x_func_state_params func_params = {NULL};
2118
2119         /* Prepare parameters for function state transitions */
2120         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2121
2122         func_params.f_obj = &bp->func_obj;
2123         func_params.cmd = BNX2X_F_CMD_HW_INIT;
2124
2125         func_params.params.hw_init.load_phase = load_code;
2126
2127         return bnx2x_func_state_change(bp, &func_params);
2128 }
2129
2130 /*
2131  * Cleans the object that have internal lists without sending
2132  * ramrods. Should be run when interrupts are disabled.
2133  */
2134 void bnx2x_squeeze_objects(struct bnx2x *bp)
2135 {
2136         int rc;
2137         unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2138         struct bnx2x_mcast_ramrod_params rparam = {NULL};
2139         struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2140
2141         /***************** Cleanup MACs' object first *************************/
2142
2143         /* Wait for completion of requested */
2144         __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2145         /* Perform a dry cleanup */
2146         __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2147
2148         /* Clean ETH primary MAC */
2149         __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2150         rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2151                                  &ramrod_flags);
2152         if (rc != 0)
2153                 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2154
2155         /* Cleanup UC list */
2156         vlan_mac_flags = 0;
2157         __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2158         rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2159                                  &ramrod_flags);
2160         if (rc != 0)
2161                 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2162
2163         /***************** Now clean mcast object *****************************/
2164         rparam.mcast_obj = &bp->mcast_obj;
2165         __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2166
2167         /* Add a DEL command... - Since we're doing a driver cleanup only,
2168          * we take a lock surrounding both the initial send and the CONTs,
2169          * as we don't want a true completion to disrupt us in the middle.
2170          */
2171         netif_addr_lock_bh(bp->dev);
2172         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2173         if (rc < 0)
2174                 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2175                           rc);
2176
2177         /* ...and wait until all pending commands are cleared */
2178         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2179         while (rc != 0) {
2180                 if (rc < 0) {
2181                         BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2182                                   rc);
2183                         netif_addr_unlock_bh(bp->dev);
2184                         return;
2185                 }
2186
2187                 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2188         }
2189         netif_addr_unlock_bh(bp->dev);
2190 }
2191
2192 #ifndef BNX2X_STOP_ON_ERROR
2193 #define LOAD_ERROR_EXIT(bp, label) \
2194         do { \
2195                 (bp)->state = BNX2X_STATE_ERROR; \
2196                 goto label; \
2197         } while (0)
2198
2199 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2200         do { \
2201                 bp->cnic_loaded = false; \
2202                 goto label; \
2203         } while (0)
2204 #else /*BNX2X_STOP_ON_ERROR*/
2205 #define LOAD_ERROR_EXIT(bp, label) \
2206         do { \
2207                 (bp)->state = BNX2X_STATE_ERROR; \
2208                 (bp)->panic = 1; \
2209                 return -EBUSY; \
2210         } while (0)
2211 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2212         do { \
2213                 bp->cnic_loaded = false; \
2214                 (bp)->panic = 1; \
2215                 return -EBUSY; \
2216         } while (0)
2217 #endif /*BNX2X_STOP_ON_ERROR*/
2218
2219 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2220 {
2221         BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2222                        bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2223         return;
2224 }
2225
2226 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2227 {
2228         int num_groups, vf_headroom = 0;
2229         int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2230
2231         /* number of queues for statistics is number of eth queues + FCoE */
2232         u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2233
2234         /* Total number of FW statistics requests =
2235          * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2236          * and fcoe l2 queue) stats + num of queues (which includes another 1
2237          * for fcoe l2 queue if applicable)
2238          */
2239         bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2240
2241         /* vf stats appear in the request list, but their data is allocated by
2242          * the VFs themselves. We don't include them in the bp->fw_stats_num as
2243          * it is used to determine where to place the vf stats queries in the
2244          * request struct
2245          */
2246         if (IS_SRIOV(bp))
2247                 vf_headroom = bnx2x_vf_headroom(bp);
2248
2249         /* Request is built from stats_query_header and an array of
2250          * stats_query_cmd_group each of which contains
2251          * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2252          * configured in the stats_query_header.
2253          */
2254         num_groups =
2255                 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2256                  (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2257                  1 : 0));
2258
2259         DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2260            bp->fw_stats_num, vf_headroom, num_groups);
2261         bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2262                 num_groups * sizeof(struct stats_query_cmd_group);
2263
2264         /* Data for statistics requests + stats_counter
2265          * stats_counter holds per-STORM counters that are incremented
2266          * when STORM has finished with the current request.
2267          * memory for FCoE offloaded statistics are counted anyway,
2268          * even if they will not be sent.
2269          * VF stats are not accounted for here as the data of VF stats is stored
2270          * in memory allocated by the VF, not here.
2271          */
2272         bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2273                 sizeof(struct per_pf_stats) +
2274                 sizeof(struct fcoe_statistics_params) +
2275                 sizeof(struct per_queue_stats) * num_queue_stats +
2276                 sizeof(struct stats_counter);
2277
2278         bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2279                                        bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2280         if (!bp->fw_stats)
2281                 goto alloc_mem_err;
2282
2283         /* Set shortcuts */
2284         bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2285         bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2286         bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2287                 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2288         bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2289                 bp->fw_stats_req_sz;
2290
2291         DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2292            U64_HI(bp->fw_stats_req_mapping),
2293            U64_LO(bp->fw_stats_req_mapping));
2294         DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2295            U64_HI(bp->fw_stats_data_mapping),
2296            U64_LO(bp->fw_stats_data_mapping));
2297         return 0;
2298
2299 alloc_mem_err:
2300         bnx2x_free_fw_stats_mem(bp);
2301         BNX2X_ERR("Can't allocate FW stats memory\n");
2302         return -ENOMEM;
2303 }
2304
2305 /* send load request to mcp and analyze response */
2306 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2307 {
2308         u32 param;
2309
2310         /* init fw_seq */
2311         bp->fw_seq =
2312                 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2313                  DRV_MSG_SEQ_NUMBER_MASK);
2314         BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2315
2316         /* Get current FW pulse sequence */
2317         bp->fw_drv_pulse_wr_seq =
2318                 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2319                  DRV_PULSE_SEQ_MASK);
2320         BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2321
2322         param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2323
2324         if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2325                 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2326
2327         /* load request */
2328         (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2329
2330         /* if mcp fails to respond we must abort */
2331         if (!(*load_code)) {
2332                 BNX2X_ERR("MCP response failure, aborting\n");
2333                 return -EBUSY;
2334         }
2335
2336         /* If mcp refused (e.g. other port is in diagnostic mode) we
2337          * must abort
2338          */
2339         if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2340                 BNX2X_ERR("MCP refused load request, aborting\n");
2341                 return -EBUSY;
2342         }
2343         return 0;
2344 }
2345
2346 /* check whether another PF has already loaded FW to chip. In
2347  * virtualized environments a pf from another VM may have already
2348  * initialized the device including loading FW
2349  */
2350 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2351 {
2352         /* is another pf loaded on this engine? */
2353         if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2354             load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2355                 /* build my FW version dword */
2356                 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2357                         (BCM_5710_FW_MINOR_VERSION << 8) +
2358                         (BCM_5710_FW_REVISION_VERSION << 16) +
2359                         (BCM_5710_FW_ENGINEERING_VERSION << 24);
2360
2361                 /* read loaded FW from chip */
2362                 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2363
2364                 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2365                    loaded_fw, my_fw);
2366
2367                 /* abort nic load if version mismatch */
2368                 if (my_fw != loaded_fw) {
2369                         if (print_err)
2370                                 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2371                                           loaded_fw, my_fw);
2372                         else
2373                                 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2374                                                loaded_fw, my_fw);
2375                         return -EBUSY;
2376                 }
2377         }
2378         return 0;
2379 }
2380
2381 /* returns the "mcp load_code" according to global load_count array */
2382 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2383 {
2384         int path = BP_PATH(bp);
2385
2386         DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
2387            path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2388            bnx2x_load_count[path][2]);
2389         bnx2x_load_count[path][0]++;
2390         bnx2x_load_count[path][1 + port]++;
2391         DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
2392            path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2393            bnx2x_load_count[path][2]);
2394         if (bnx2x_load_count[path][0] == 1)
2395                 return FW_MSG_CODE_DRV_LOAD_COMMON;
2396         else if (bnx2x_load_count[path][1 + port] == 1)
2397                 return FW_MSG_CODE_DRV_LOAD_PORT;
2398         else
2399                 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2400 }
2401
2402 /* mark PMF if applicable */
2403 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2404 {
2405         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2406             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2407             (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2408                 bp->port.pmf = 1;
2409                 /* We need the barrier to ensure the ordering between the
2410                  * writing to bp->port.pmf here and reading it from the
2411                  * bnx2x_periodic_task().
2412                  */
2413                 smp_mb();
2414         } else {
2415                 bp->port.pmf = 0;
2416         }
2417
2418         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2419 }
2420
2421 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2422 {
2423         if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2424              (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2425             (bp->common.shmem2_base)) {
2426                 if (SHMEM2_HAS(bp, dcc_support))
2427                         SHMEM2_WR(bp, dcc_support,
2428                                   (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2429                                    SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2430                 if (SHMEM2_HAS(bp, afex_driver_support))
2431                         SHMEM2_WR(bp, afex_driver_support,
2432                                   SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2433         }
2434
2435         /* Set AFEX default VLAN tag to an invalid value */
2436         bp->afex_def_vlan_tag = -1;
2437 }
2438
2439 /**
2440  * bnx2x_bz_fp - zero content of the fastpath structure.
2441  *
2442  * @bp:         driver handle
2443  * @index:      fastpath index to be zeroed
2444  *
2445  * Makes sure the contents of the bp->fp[index].napi is kept
2446  * intact.
2447  */
2448 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2449 {
2450         struct bnx2x_fastpath *fp = &bp->fp[index];
2451         int cos;
2452         struct napi_struct orig_napi = fp->napi;
2453         struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2454
2455         /* bzero bnx2x_fastpath contents */
2456         if (fp->tpa_info)
2457                 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2458                        sizeof(struct bnx2x_agg_info));
2459         memset(fp, 0, sizeof(*fp));
2460
2461         /* Restore the NAPI object as it has been already initialized */
2462         fp->napi = orig_napi;
2463         fp->tpa_info = orig_tpa_info;
2464         fp->bp = bp;
2465         fp->index = index;
2466         if (IS_ETH_FP(fp))
2467                 fp->max_cos = bp->max_cos;
2468         else
2469                 /* Special queues support only one CoS */
2470                 fp->max_cos = 1;
2471
2472         /* Init txdata pointers */
2473         if (IS_FCOE_FP(fp))
2474                 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2475         if (IS_ETH_FP(fp))
2476                 for_each_cos_in_tx_queue(fp, cos)
2477                         fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2478                                 BNX2X_NUM_ETH_QUEUES(bp) + index];
2479
2480         /* set the tpa flag for each queue. The tpa flag determines the queue
2481          * minimal size so it must be set prior to queue memory allocation
2482          */
2483         if (bp->dev->features & NETIF_F_LRO)
2484                 fp->mode = TPA_MODE_LRO;
2485         else if (bp->dev->features & NETIF_F_GRO &&
2486                  bnx2x_mtu_allows_gro(bp->dev->mtu))
2487                 fp->mode = TPA_MODE_GRO;
2488         else
2489                 fp->mode = TPA_MODE_DISABLED;
2490
2491         /* We don't want TPA if it's disabled in bp
2492          * or if this is an FCoE L2 ring.
2493          */
2494         if (bp->disable_tpa || IS_FCOE_FP(fp))
2495                 fp->mode = TPA_MODE_DISABLED;
2496 }
2497
2498 void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
2499 {
2500         u32 cur;
2501
2502         if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
2503                 return;
2504
2505         cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
2506         DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
2507            cur, state);
2508
2509         SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
2510 }
2511
2512 int bnx2x_load_cnic(struct bnx2x *bp)
2513 {
2514         int i, rc, port = BP_PORT(bp);
2515
2516         DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2517
2518         mutex_init(&bp->cnic_mutex);
2519
2520         if (IS_PF(bp)) {
2521                 rc = bnx2x_alloc_mem_cnic(bp);
2522                 if (rc) {
2523                         BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2524                         LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2525                 }
2526         }
2527
2528         rc = bnx2x_alloc_fp_mem_cnic(bp);
2529         if (rc) {
2530                 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2531                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2532         }
2533
2534         /* Update the number of queues with the cnic queues */
2535         rc = bnx2x_set_real_num_queues(bp, 1);
2536         if (rc) {
2537                 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2538                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2539         }
2540
2541         /* Add all CNIC NAPI objects */
2542         bnx2x_add_all_napi_cnic(bp);
2543         DP(NETIF_MSG_IFUP, "cnic napi added\n");
2544         bnx2x_napi_enable_cnic(bp);
2545
2546         rc = bnx2x_init_hw_func_cnic(bp);
2547         if (rc)
2548                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2549
2550         bnx2x_nic_init_cnic(bp);
2551
2552         if (IS_PF(bp)) {
2553                 /* Enable Timer scan */
2554                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2555
2556                 /* setup cnic queues */
2557                 for_each_cnic_queue(bp, i) {
2558                         rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2559                         if (rc) {
2560                                 BNX2X_ERR("Queue setup failed\n");
2561                                 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2562                         }
2563                 }
2564         }
2565
2566         /* Initialize Rx filter. */
2567         bnx2x_set_rx_mode_inner(bp);
2568
2569         /* re-read iscsi info */
2570         bnx2x_get_iscsi_info(bp);
2571         bnx2x_setup_cnic_irq_info(bp);
2572         bnx2x_setup_cnic_info(bp);
2573         bp->cnic_loaded = true;
2574         if (bp->state == BNX2X_STATE_OPEN)
2575                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2576
2577         DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2578
2579         return 0;
2580
2581 #ifndef BNX2X_STOP_ON_ERROR
2582 load_error_cnic2:
2583         /* Disable Timer scan */
2584         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2585
2586 load_error_cnic1:
2587         bnx2x_napi_disable_cnic(bp);
2588         /* Update the number of queues without the cnic queues */
2589         if (bnx2x_set_real_num_queues(bp, 0))
2590                 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2591 load_error_cnic0:
2592         BNX2X_ERR("CNIC-related load failed\n");
2593         bnx2x_free_fp_mem_cnic(bp);
2594         bnx2x_free_mem_cnic(bp);
2595         return rc;
2596 #endif /* ! BNX2X_STOP_ON_ERROR */
2597 }
2598
2599 /* must be called with rtnl_lock */
2600 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2601 {
2602         int port = BP_PORT(bp);
2603         int i, rc = 0, load_code = 0;
2604
2605         DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2606         DP(NETIF_MSG_IFUP,
2607            "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2608
2609 #ifdef BNX2X_STOP_ON_ERROR
2610         if (unlikely(bp->panic)) {
2611                 BNX2X_ERR("Can't load NIC when there is panic\n");
2612                 return -EPERM;
2613         }
2614 #endif
2615
2616         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2617
2618         /* zero the structure w/o any lock, before SP handler is initialized */
2619         memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2620         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2621                 &bp->last_reported_link.link_report_flags);
2622
2623         if (IS_PF(bp))
2624                 /* must be called before memory allocation and HW init */
2625                 bnx2x_ilt_set_info(bp);
2626
2627         /*
2628          * Zero fastpath structures preserving invariants like napi, which are
2629          * allocated only once, fp index, max_cos, bp pointer.
2630          * Also set fp->mode and txdata_ptr.
2631          */
2632         DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2633         for_each_queue(bp, i)
2634                 bnx2x_bz_fp(bp, i);
2635         memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2636                                   bp->num_cnic_queues) *
2637                                   sizeof(struct bnx2x_fp_txdata));
2638
2639         bp->fcoe_init = false;
2640
2641         /* Set the receive queues buffer size */
2642         bnx2x_set_rx_buf_size(bp);
2643
2644         if (IS_PF(bp)) {
2645                 rc = bnx2x_alloc_mem(bp);
2646                 if (rc) {
2647                         BNX2X_ERR("Unable to allocate bp memory\n");
2648                         return rc;
2649                 }
2650         }
2651
2652         /* need to be done after alloc mem, since it's self adjusting to amount
2653          * of memory available for RSS queues
2654          */
2655         rc = bnx2x_alloc_fp_mem(bp);
2656         if (rc) {
2657                 BNX2X_ERR("Unable to allocate memory for fps\n");
2658                 LOAD_ERROR_EXIT(bp, load_error0);
2659         }
2660
2661         /* Allocated memory for FW statistics  */
2662         if (bnx2x_alloc_fw_stats_mem(bp))
2663                 LOAD_ERROR_EXIT(bp, load_error0);
2664
2665         /* request pf to initialize status blocks */
2666         if (IS_VF(bp)) {
2667                 rc = bnx2x_vfpf_init(bp);
2668                 if (rc)
2669                         LOAD_ERROR_EXIT(bp, load_error0);
2670         }
2671
2672         /* As long as bnx2x_alloc_mem() may possibly update
2673          * bp->num_queues, bnx2x_set_real_num_queues() should always
2674          * come after it. At this stage cnic queues are not counted.
2675          */
2676         rc = bnx2x_set_real_num_queues(bp, 0);
2677         if (rc) {
2678                 BNX2X_ERR("Unable to set real_num_queues\n");
2679                 LOAD_ERROR_EXIT(bp, load_error0);
2680         }
2681
2682         /* configure multi cos mappings in kernel.
2683          * this configuration may be overridden by a multi class queue
2684          * discipline or by a dcbx negotiation result.
2685          */
2686         bnx2x_setup_tc(bp->dev, bp->max_cos);
2687
2688         /* Add all NAPI objects */
2689         bnx2x_add_all_napi(bp);
2690         DP(NETIF_MSG_IFUP, "napi added\n");
2691         bnx2x_napi_enable(bp);
2692
2693         if (IS_PF(bp)) {
2694                 /* set pf load just before approaching the MCP */
2695                 bnx2x_set_pf_load(bp);
2696
2697                 /* if mcp exists send load request and analyze response */
2698                 if (!BP_NOMCP(bp)) {
2699                         /* attempt to load pf */
2700                         rc = bnx2x_nic_load_request(bp, &load_code);
2701                         if (rc)
2702                                 LOAD_ERROR_EXIT(bp, load_error1);
2703
2704                         /* what did mcp say? */
2705                         rc = bnx2x_compare_fw_ver(bp, load_code, true);
2706                         if (rc) {
2707                                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2708                                 LOAD_ERROR_EXIT(bp, load_error2);
2709                         }
2710                 } else {
2711                         load_code = bnx2x_nic_load_no_mcp(bp, port);
2712                 }
2713
2714                 /* mark pmf if applicable */
2715                 bnx2x_nic_load_pmf(bp, load_code);
2716
2717                 /* Init Function state controlling object */
2718                 bnx2x__init_func_obj(bp);
2719
2720                 /* Initialize HW */
2721                 rc = bnx2x_init_hw(bp, load_code);
2722                 if (rc) {
2723                         BNX2X_ERR("HW init failed, aborting\n");
2724                         bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2725                         LOAD_ERROR_EXIT(bp, load_error2);
2726                 }
2727         }
2728
2729         bnx2x_pre_irq_nic_init(bp);
2730
2731         /* Connect to IRQs */
2732         rc = bnx2x_setup_irqs(bp);
2733         if (rc) {
2734                 BNX2X_ERR("setup irqs failed\n");
2735                 if (IS_PF(bp))
2736                         bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2737                 LOAD_ERROR_EXIT(bp, load_error2);
2738         }
2739
2740         /* Init per-function objects */
2741         if (IS_PF(bp)) {
2742                 /* Setup NIC internals and enable interrupts */
2743                 bnx2x_post_irq_nic_init(bp, load_code);
2744
2745                 bnx2x_init_bp_objs(bp);
2746                 bnx2x_iov_nic_init(bp);
2747
2748                 /* Set AFEX default VLAN tag to an invalid value */
2749                 bp->afex_def_vlan_tag = -1;
2750                 bnx2x_nic_load_afex_dcc(bp, load_code);
2751                 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2752                 rc = bnx2x_func_start(bp);
2753                 if (rc) {
2754                         BNX2X_ERR("Function start failed!\n");
2755                         bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2756
2757                         LOAD_ERROR_EXIT(bp, load_error3);
2758                 }
2759
2760                 /* Send LOAD_DONE command to MCP */
2761                 if (!BP_NOMCP(bp)) {
2762                         load_code = bnx2x_fw_command(bp,
2763                                                      DRV_MSG_CODE_LOAD_DONE, 0);
2764                         if (!load_code) {
2765                                 BNX2X_ERR("MCP response failure, aborting\n");
2766                                 rc = -EBUSY;
2767                                 LOAD_ERROR_EXIT(bp, load_error3);
2768                         }
2769                 }
2770
2771                 /* initialize FW coalescing state machines in RAM */
2772                 bnx2x_update_coalesce(bp);
2773         }
2774
2775         /* setup the leading queue */
2776         rc = bnx2x_setup_leading(bp);
2777         if (rc) {
2778                 BNX2X_ERR("Setup leading failed!\n");
2779                 LOAD_ERROR_EXIT(bp, load_error3);
2780         }
2781
2782         /* set up the rest of the queues */
2783         for_each_nondefault_eth_queue(bp, i) {
2784                 if (IS_PF(bp))
2785                         rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2786                 else /* VF */
2787                         rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2788                 if (rc) {
2789                         BNX2X_ERR("Queue %d setup failed\n", i);
2790                         LOAD_ERROR_EXIT(bp, load_error3);
2791                 }
2792         }
2793
2794         /* setup rss */
2795         rc = bnx2x_init_rss(bp);
2796         if (rc) {
2797                 BNX2X_ERR("PF RSS init failed\n");
2798                 LOAD_ERROR_EXIT(bp, load_error3);
2799         }
2800
2801         /* Now when Clients are configured we are ready to work */
2802         bp->state = BNX2X_STATE_OPEN;
2803
2804         /* Configure a ucast MAC */
2805         if (IS_PF(bp))
2806                 rc = bnx2x_set_eth_mac(bp, true);
2807         else /* vf */
2808                 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2809                                            true);
2810         if (rc) {
2811                 BNX2X_ERR("Setting Ethernet MAC failed\n");
2812                 LOAD_ERROR_EXIT(bp, load_error3);
2813         }
2814
2815         if (IS_PF(bp) && bp->pending_max) {
2816                 bnx2x_update_max_mf_config(bp, bp->pending_max);
2817                 bp->pending_max = 0;
2818         }
2819
2820         if (bp->port.pmf) {
2821                 rc = bnx2x_initial_phy_init(bp, load_mode);
2822                 if (rc)
2823                         LOAD_ERROR_EXIT(bp, load_error3);
2824         }
2825         bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2826
2827         /* Start fast path */
2828
2829         /* Re-configure vlan filters */
2830         rc = bnx2x_vlan_reconfigure_vid(bp);
2831         if (rc)
2832                 LOAD_ERROR_EXIT(bp, load_error3);
2833
2834         /* Initialize Rx filter. */
2835         bnx2x_set_rx_mode_inner(bp);
2836
2837         if (bp->flags & PTP_SUPPORTED) {
2838                 bnx2x_init_ptp(bp);
2839                 bnx2x_configure_ptp_filters(bp);
2840         }
2841         /* Start Tx */
2842         switch (load_mode) {
2843         case LOAD_NORMAL:
2844                 /* Tx queue should be only re-enabled */
2845                 netif_tx_wake_all_queues(bp->dev);
2846                 break;
2847
2848         case LOAD_OPEN:
2849                 netif_tx_start_all_queues(bp->dev);
2850                 smp_mb__after_atomic();
2851                 break;
2852
2853         case LOAD_DIAG:
2854         case LOAD_LOOPBACK_EXT:
2855                 bp->state = BNX2X_STATE_DIAG;
2856                 break;
2857
2858         default:
2859                 break;
2860         }
2861
2862         if (bp->port.pmf)
2863                 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2864         else
2865                 bnx2x__link_status_update(bp);
2866
2867         /* start the timer */
2868         mod_timer(&bp->timer, jiffies + bp->current_interval);
2869
2870         if (CNIC_ENABLED(bp))
2871                 bnx2x_load_cnic(bp);
2872
2873         if (IS_PF(bp))
2874                 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2875
2876         if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2877                 /* mark driver is loaded in shmem2 */
2878                 u32 val;
2879                 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2880                 val &= ~DRV_FLAGS_MTU_MASK;
2881                 val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
2882                 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2883                           val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2884                           DRV_FLAGS_CAPABILITIES_LOADED_L2);
2885         }
2886
2887         /* Wait for all pending SP commands to complete */
2888         if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2889                 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2890                 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2891                 return -EBUSY;
2892         }
2893
2894         /* Update driver data for On-Chip MFW dump. */
2895         if (IS_PF(bp))
2896                 bnx2x_update_mfw_dump(bp);
2897
2898         /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2899         if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2900                 bnx2x_dcbx_init(bp, false);
2901
2902         if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2903                 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
2904
2905         DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2906
2907         return 0;
2908
2909 #ifndef BNX2X_STOP_ON_ERROR
2910 load_error3:
2911         if (IS_PF(bp)) {
2912                 bnx2x_int_disable_sync(bp, 1);
2913
2914                 /* Clean queueable objects */
2915                 bnx2x_squeeze_objects(bp);
2916         }
2917
2918         /* Free SKBs, SGEs, TPA pool and driver internals */
2919         bnx2x_free_skbs(bp);
2920         for_each_rx_queue(bp, i)
2921                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2922
2923         /* Release IRQs */
2924         bnx2x_free_irq(bp);
2925 load_error2:
2926         if (IS_PF(bp) && !BP_NOMCP(bp)) {
2927                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2928                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2929         }
2930
2931         bp->port.pmf = 0;
2932 load_error1:
2933         bnx2x_napi_disable(bp);
2934         bnx2x_del_all_napi(bp);
2935
2936         /* clear pf_load status, as it was already set */
2937         if (IS_PF(bp))
2938                 bnx2x_clear_pf_load(bp);
2939 load_error0:
2940         bnx2x_free_fw_stats_mem(bp);
2941         bnx2x_free_fp_mem(bp);
2942         bnx2x_free_mem(bp);
2943
2944         return rc;
2945 #endif /* ! BNX2X_STOP_ON_ERROR */
2946 }
2947
2948 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2949 {
2950         u8 rc = 0, cos, i;
2951
2952         /* Wait until tx fastpath tasks complete */
2953         for_each_tx_queue(bp, i) {
2954                 struct bnx2x_fastpath *fp = &bp->fp[i];
2955
2956                 for_each_cos_in_tx_queue(fp, cos)
2957                         rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2958                 if (rc)
2959                         return rc;
2960         }
2961         return 0;
2962 }
2963
2964 /* must be called with rtnl_lock */
2965 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2966 {
2967         int i;
2968         bool global = false;
2969
2970         DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2971
2972         if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2973                 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
2974
2975         /* mark driver is unloaded in shmem2 */
2976         if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2977                 u32 val;
2978                 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2979                 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2980                           val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2981         }
2982
2983         if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2984             (bp->state == BNX2X_STATE_CLOSED ||
2985              bp->state == BNX2X_STATE_ERROR)) {
2986                 /* We can get here if the driver has been unloaded
2987                  * during parity error recovery and is either waiting for a
2988                  * leader to complete or for other functions to unload and
2989                  * then ifdown has been issued. In this case we want to
2990                  * unload and let other functions to complete a recovery
2991                  * process.
2992                  */
2993                 bp->recovery_state = BNX2X_RECOVERY_DONE;
2994                 bp->is_leader = 0;
2995                 bnx2x_release_leader_lock(bp);
2996                 smp_mb();
2997
2998                 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2999                 BNX2X_ERR("Can't unload in closed or error state\n");
3000                 return -EINVAL;
3001         }
3002
3003         /* Nothing to do during unload if previous bnx2x_nic_load()
3004          * have not completed successfully - all resources are released.
3005          *
3006          * we can get here only after unsuccessful ndo_* callback, during which
3007          * dev->IFF_UP flag is still on.
3008          */
3009         if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3010                 return 0;
3011
3012         /* It's important to set the bp->state to the value different from
3013          * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
3014          * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3015          */
3016         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3017         smp_mb();
3018
3019         /* indicate to VFs that the PF is going down */
3020         bnx2x_iov_channel_down(bp);
3021
3022         if (CNIC_LOADED(bp))
3023                 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3024
3025         /* Stop Tx */
3026         bnx2x_tx_disable(bp);
3027         netdev_reset_tc(bp->dev);
3028
3029         bp->rx_mode = BNX2X_RX_MODE_NONE;
3030
3031         del_timer_sync(&bp->timer);
3032
3033         if (IS_PF(bp)) {
3034                 /* Set ALWAYS_ALIVE bit in shmem */
3035                 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3036                 bnx2x_drv_pulse(bp);
3037                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3038                 bnx2x_save_statistics(bp);
3039         }
3040
3041         /* wait till consumers catch up with producers in all queues.
3042          * If we're recovering, FW can't write to host so no reason
3043          * to wait for the queues to complete all Tx.
3044          */
3045         if (unload_mode != UNLOAD_RECOVERY)
3046                 bnx2x_drain_tx_queues(bp);
3047
3048         /* if VF indicate to PF this function is going down (PF will delete sp
3049          * elements and clear initializations
3050          */
3051         if (IS_VF(bp))
3052                 bnx2x_vfpf_close_vf(bp);
3053         else if (unload_mode != UNLOAD_RECOVERY)
3054                 /* if this is a normal/close unload need to clean up chip*/
3055                 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3056         else {
3057                 /* Send the UNLOAD_REQUEST to the MCP */
3058                 bnx2x_send_unload_req(bp, unload_mode);
3059
3060                 /* Prevent transactions to host from the functions on the
3061                  * engine that doesn't reset global blocks in case of global
3062                  * attention once global blocks are reset and gates are opened
3063                  * (the engine which leader will perform the recovery
3064                  * last).
3065                  */
3066                 if (!CHIP_IS_E1x(bp))
3067                         bnx2x_pf_disable(bp);
3068
3069                 /* Disable HW interrupts, NAPI */
3070                 bnx2x_netif_stop(bp, 1);
3071                 /* Delete all NAPI objects */
3072                 bnx2x_del_all_napi(bp);
3073                 if (CNIC_LOADED(bp))
3074                         bnx2x_del_all_napi_cnic(bp);
3075                 /* Release IRQs */
3076                 bnx2x_free_irq(bp);
3077
3078                 /* Report UNLOAD_DONE to MCP */
3079                 bnx2x_send_unload_done(bp, false);
3080         }
3081
3082         /*
3083          * At this stage no more interrupts will arrive so we may safely clean
3084          * the queueable objects here in case they failed to get cleaned so far.
3085          */
3086         if (IS_PF(bp))
3087                 bnx2x_squeeze_objects(bp);
3088
3089         /* There should be no more pending SP commands at this stage */
3090         bp->sp_state = 0;
3091
3092         bp->port.pmf = 0;
3093
3094         /* clear pending work in rtnl task */
3095         bp->sp_rtnl_state = 0;
3096         smp_mb();
3097
3098         /* Free SKBs, SGEs, TPA pool and driver internals */
3099         bnx2x_free_skbs(bp);
3100         if (CNIC_LOADED(bp))
3101                 bnx2x_free_skbs_cnic(bp);
3102         for_each_rx_queue(bp, i)
3103                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3104
3105         bnx2x_free_fp_mem(bp);
3106         if (CNIC_LOADED(bp))
3107                 bnx2x_free_fp_mem_cnic(bp);
3108
3109         if (IS_PF(bp)) {
3110                 if (CNIC_LOADED(bp))
3111                         bnx2x_free_mem_cnic(bp);
3112         }
3113         bnx2x_free_mem(bp);
3114
3115         bp->state = BNX2X_STATE_CLOSED;
3116         bp->cnic_loaded = false;
3117
3118         /* Clear driver version indication in shmem */
3119         if (IS_PF(bp))
3120                 bnx2x_update_mng_version(bp);
3121
3122         /* Check if there are pending parity attentions. If there are - set
3123          * RECOVERY_IN_PROGRESS.
3124          */
3125         if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3126                 bnx2x_set_reset_in_progress(bp);
3127
3128                 /* Set RESET_IS_GLOBAL if needed */
3129                 if (global)
3130                         bnx2x_set_reset_global(bp);
3131         }
3132
3133         /* The last driver must disable a "close the gate" if there is no
3134          * parity attention or "process kill" pending.
3135          */
3136         if (IS_PF(bp) &&
3137             !bnx2x_clear_pf_load(bp) &&
3138             bnx2x_reset_is_done(bp, BP_PATH(bp)))
3139                 bnx2x_disable_close_the_gate(bp);
3140
3141         DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3142
3143         return 0;
3144 }
3145
3146 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3147 {
3148         u16 pmcsr;
3149
3150         /* If there is no power capability, silently succeed */
3151         if (!bp->pdev->pm_cap) {
3152                 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3153                 return 0;
3154         }
3155
3156         pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3157
3158         switch (state) {
3159         case PCI_D0:
3160                 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3161                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3162                                        PCI_PM_CTRL_PME_STATUS));
3163
3164                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3165                         /* delay required during transition out of D3hot */
3166                         msleep(20);
3167                 break;
3168
3169         case PCI_D3hot:
3170                 /* If there are other clients above don't
3171                    shut down the power */
3172                 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3173                         return 0;
3174                 /* Don't shut down the power for emulation and FPGA */
3175                 if (CHIP_REV_IS_SLOW(bp))
3176                         return 0;
3177
3178                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3179                 pmcsr |= 3;
3180
3181                 if (bp->wol)
3182                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3183
3184                 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3185                                       pmcsr);
3186
3187                 /* No more memory access after this point until
3188                 * device is brought back to D0.
3189                 */
3190                 break;
3191
3192         default:
3193                 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3194                 return -EINVAL;
3195         }
3196         return 0;
3197 }
3198
3199 /*
3200  * net_device service functions
3201  */
3202 static int bnx2x_poll(struct napi_struct *napi, int budget)
3203 {
3204         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3205                                                  napi);
3206         struct bnx2x *bp = fp->bp;
3207         int rx_work_done;
3208         u8 cos;
3209
3210 #ifdef BNX2X_STOP_ON_ERROR
3211         if (unlikely(bp->panic)) {
3212                 napi_complete(napi);
3213                 return 0;
3214         }
3215 #endif
3216         for_each_cos_in_tx_queue(fp, cos)
3217                 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3218                         bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3219
3220         rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0;
3221
3222         if (rx_work_done < budget) {
3223                 /* No need to update SB for FCoE L2 ring as long as
3224                  * it's connected to the default SB and the SB
3225                  * has been updated when NAPI was scheduled.
3226                  */
3227                 if (IS_FCOE_FP(fp)) {
3228                         napi_complete_done(napi, rx_work_done);
3229                 } else {
3230                         bnx2x_update_fpsb_idx(fp);
3231                         /* bnx2x_has_rx_work() reads the status block,
3232                          * thus we need to ensure that status block indices
3233                          * have been actually read (bnx2x_update_fpsb_idx)
3234                          * prior to this check (bnx2x_has_rx_work) so that
3235                          * we won't write the "newer" value of the status block
3236                          * to IGU (if there was a DMA right after
3237                          * bnx2x_has_rx_work and if there is no rmb, the memory
3238                          * reading (bnx2x_update_fpsb_idx) may be postponed
3239                          * to right before bnx2x_ack_sb). In this case there
3240                          * will never be another interrupt until there is
3241                          * another update of the status block, while there
3242                          * is still unhandled work.
3243                          */
3244                         rmb();
3245
3246                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3247                                 if (napi_complete_done(napi, rx_work_done)) {
3248                                         /* Re-enable interrupts */
3249                                         DP(NETIF_MSG_RX_STATUS,
3250                                            "Update index to %d\n", fp->fp_hc_idx);
3251                                         bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3252                                                      le16_to_cpu(fp->fp_hc_idx),
3253                                                      IGU_INT_ENABLE, 1);
3254                                 }
3255                         } else {
3256                                 rx_work_done = budget;
3257                         }
3258                 }
3259         }
3260
3261         return rx_work_done;
3262 }
3263
3264 /* we split the first BD into headers and data BDs
3265  * to ease the pain of our fellow microcode engineers
3266  * we use one mapping for both BDs
3267  */
3268 static u16 bnx2x_tx_split(struct bnx2x *bp,
3269                           struct bnx2x_fp_txdata *txdata,
3270                           struct sw_tx_bd *tx_buf,
3271                           struct eth_tx_start_bd **tx_bd, u16 hlen,
3272                           u16 bd_prod)
3273 {
3274         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3275         struct eth_tx_bd *d_tx_bd;
3276         dma_addr_t mapping;
3277         int old_len = le16_to_cpu(h_tx_bd->nbytes);
3278
3279         /* first fix first BD */
3280         h_tx_bd->nbytes = cpu_to_le16(hlen);
3281
3282         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3283            h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3284
3285         /* now get a new data BD
3286          * (after the pbd) and fill it */
3287         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3288         d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3289
3290         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3291                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3292
3293         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3294         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3295         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3296
3297         /* this marks the BD as one that has no individual mapping */
3298         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3299
3300         DP(NETIF_MSG_TX_QUEUED,
3301            "TSO split data size is %d (%x:%x)\n",
3302            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3303
3304         /* update tx_bd */
3305         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3306
3307         return bd_prod;
3308 }
3309
3310 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3311 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3312 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3313 {
3314         __sum16 tsum = (__force __sum16) csum;
3315
3316         if (fix > 0)
3317                 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3318                                   csum_partial(t_header - fix, fix, 0)));
3319
3320         else if (fix < 0)
3321                 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3322                                   csum_partial(t_header, -fix, 0)));
3323
3324         return bswab16(tsum);
3325 }
3326
3327 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3328 {
3329         u32 rc;
3330         __u8 prot = 0;
3331         __be16 protocol;
3332
3333         if (skb->ip_summed != CHECKSUM_PARTIAL)
3334                 return XMIT_PLAIN;
3335
3336         protocol = vlan_get_protocol(skb);
3337         if (protocol == htons(ETH_P_IPV6)) {
3338                 rc = XMIT_CSUM_V6;
3339                 prot = ipv6_hdr(skb)->nexthdr;
3340         } else {
3341                 rc = XMIT_CSUM_V4;
3342                 prot = ip_hdr(skb)->protocol;
3343         }
3344
3345         if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3346                 if (inner_ip_hdr(skb)->version == 6) {
3347                         rc |= XMIT_CSUM_ENC_V6;
3348                         if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3349                                 rc |= XMIT_CSUM_TCP;
3350                 } else {
3351                         rc |= XMIT_CSUM_ENC_V4;
3352                         if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3353                                 rc |= XMIT_CSUM_TCP;
3354                 }
3355         }
3356         if (prot == IPPROTO_TCP)
3357                 rc |= XMIT_CSUM_TCP;
3358
3359         if (skb_is_gso(skb)) {
3360                 if (skb_is_gso_v6(skb)) {
3361                         rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3362                         if (rc & XMIT_CSUM_ENC)
3363                                 rc |= XMIT_GSO_ENC_V6;
3364                 } else {
3365                         rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3366                         if (rc & XMIT_CSUM_ENC)
3367                                 rc |= XMIT_GSO_ENC_V4;
3368                 }
3369         }
3370
3371         return rc;
3372 }
3373
3374 /* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
3375 #define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS         4
3376
3377 /* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3378 #define BNX2X_NUM_TSO_WIN_SUB_BDS               3
3379
3380 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3381 /* check if packet requires linearization (packet is too fragmented)
3382    no need to check fragmentation if page size > 8K (there will be no
3383    violation to FW restrictions) */
3384 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3385                              u32 xmit_type)
3386 {
3387         int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
3388         int to_copy = 0, hlen = 0;
3389
3390         if (xmit_type & XMIT_GSO_ENC)
3391                 num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
3392
3393         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
3394                 if (xmit_type & XMIT_GSO) {
3395                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3396                         int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
3397                         /* Number of windows to check */
3398                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3399                         int wnd_idx = 0;
3400                         int frag_idx = 0;
3401                         u32 wnd_sum = 0;
3402
3403                         /* Headers length */
3404                         if (xmit_type & XMIT_GSO_ENC)
3405                                 hlen = (int)(skb_inner_transport_header(skb) -
3406                                              skb->data) +
3407                                              inner_tcp_hdrlen(skb);
3408                         else
3409                                 hlen = (int)(skb_transport_header(skb) -
3410                                              skb->data) + tcp_hdrlen(skb);
3411
3412                         /* Amount of data (w/o headers) on linear part of SKB*/
3413                         first_bd_sz = skb_headlen(skb) - hlen;
3414
3415                         wnd_sum  = first_bd_sz;
3416
3417                         /* Calculate the first sum - it's special */
3418                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3419                                 wnd_sum +=
3420                                         skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3421
3422                         /* If there was data on linear skb data - check it */
3423                         if (first_bd_sz > 0) {
3424                                 if (unlikely(wnd_sum < lso_mss)) {
3425                                         to_copy = 1;
3426                                         goto exit_lbl;
3427                                 }
3428
3429                                 wnd_sum -= first_bd_sz;
3430                         }
3431
3432                         /* Others are easier: run through the frag list and
3433                            check all windows */
3434                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3435                                 wnd_sum +=
3436                           skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3437
3438                                 if (unlikely(wnd_sum < lso_mss)) {
3439                                         to_copy = 1;
3440                                         break;
3441                                 }
3442                                 wnd_sum -=
3443                                         skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3444                         }
3445                 } else {
3446                         /* in non-LSO too fragmented packet should always
3447                            be linearized */
3448                         to_copy = 1;
3449                 }
3450         }
3451
3452 exit_lbl:
3453         if (unlikely(to_copy))
3454                 DP(NETIF_MSG_TX_QUEUED,
3455                    "Linearization IS REQUIRED for %s packet. num_frags %d  hlen %d  first_bd_sz %d\n",
3456                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3457                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3458
3459         return to_copy;
3460 }
3461 #endif
3462
3463 /**
3464  * bnx2x_set_pbd_gso - update PBD in GSO case.
3465  *
3466  * @skb:        packet skb
3467  * @pbd:        parse BD
3468  * @xmit_type:  xmit flags
3469  */
3470 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3471                               struct eth_tx_parse_bd_e1x *pbd,
3472                               u32 xmit_type)
3473 {
3474         pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3475         pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3476         pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3477
3478         if (xmit_type & XMIT_GSO_V4) {
3479                 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3480                 pbd->tcp_pseudo_csum =
3481                         bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3482                                                    ip_hdr(skb)->daddr,
3483                                                    0, IPPROTO_TCP, 0));
3484         } else {
3485                 pbd->tcp_pseudo_csum =
3486                         bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3487                                                  &ipv6_hdr(skb)->daddr,
3488                                                  0, IPPROTO_TCP, 0));
3489         }
3490
3491         pbd->global_data |=
3492                 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3493 }
3494
3495 /**
3496  * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3497  *
3498  * @bp:                 driver handle
3499  * @skb:                packet skb
3500  * @parsing_data:       data to be updated
3501  * @xmit_type:          xmit flags
3502  *
3503  * 57712/578xx related, when skb has encapsulation
3504  */
3505 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3506                                  u32 *parsing_data, u32 xmit_type)
3507 {
3508         *parsing_data |=
3509                 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3510                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3511                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3512
3513         if (xmit_type & XMIT_CSUM_TCP) {
3514                 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3515                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3516                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3517
3518                 return skb_inner_transport_header(skb) +
3519                         inner_tcp_hdrlen(skb) - skb->data;
3520         }
3521
3522         /* We support checksum offload for TCP and UDP only.
3523          * No need to pass the UDP header length - it's a constant.
3524          */
3525         return skb_inner_transport_header(skb) +
3526                 sizeof(struct udphdr) - skb->data;
3527 }
3528
3529 /**
3530  * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3531  *
3532  * @bp:                 driver handle
3533  * @skb:                packet skb
3534  * @parsing_data:       data to be updated
3535  * @xmit_type:          xmit flags
3536  *
3537  * 57712/578xx related
3538  */
3539 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3540                                 u32 *parsing_data, u32 xmit_type)
3541 {
3542         *parsing_data |=
3543                 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3544                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3545                 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3546
3547         if (xmit_type & XMIT_CSUM_TCP) {
3548                 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3549                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3550                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3551
3552                 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3553         }
3554         /* We support checksum offload for TCP and UDP only.
3555          * No need to pass the UDP header length - it's a constant.
3556          */
3557         return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3558 }
3559
3560 /* set FW indication according to inner or outer protocols if tunneled */
3561 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3562                                struct eth_tx_start_bd *tx_start_bd,
3563                                u32 xmit_type)
3564 {
3565         tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3566
3567         if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3568                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3569
3570         if (!(xmit_type & XMIT_CSUM_TCP))
3571                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3572 }
3573
3574 /**
3575  * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3576  *
3577  * @bp:         driver handle
3578  * @skb:        packet skb
3579  * @pbd:        parse BD to be updated
3580  * @xmit_type:  xmit flags
3581  */
3582 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3583                              struct eth_tx_parse_bd_e1x *pbd,
3584                              u32 xmit_type)
3585 {
3586         u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3587
3588         /* for now NS flag is not used in Linux */
3589         pbd->global_data =
3590                 cpu_to_le16(hlen |
3591                             ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3592                              ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3593
3594         pbd->ip_hlen_w = (skb_transport_header(skb) -
3595                         skb_network_header(skb)) >> 1;
3596
3597         hlen += pbd->ip_hlen_w;
3598
3599         /* We support checksum offload for TCP and UDP only */
3600         if (xmit_type & XMIT_CSUM_TCP)
3601                 hlen += tcp_hdrlen(skb) / 2;
3602         else
3603                 hlen += sizeof(struct udphdr) / 2;
3604
3605         pbd->total_hlen_w = cpu_to_le16(hlen);
3606         hlen = hlen*2;
3607
3608         if (xmit_type & XMIT_CSUM_TCP) {
3609                 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3610
3611         } else {
3612                 s8 fix = SKB_CS_OFF(skb); /* signed! */
3613
3614                 DP(NETIF_MSG_TX_QUEUED,
3615                    "hlen %d  fix %d  csum before fix %x\n",
3616                    le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3617
3618                 /* HW bug: fixup the CSUM */
3619                 pbd->tcp_pseudo_csum =
3620                         bnx2x_csum_fix(skb_transport_header(skb),
3621                                        SKB_CS(skb), fix);
3622
3623                 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3624                    pbd->tcp_pseudo_csum);
3625         }
3626
3627         return hlen;
3628 }
3629
3630 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3631                                       struct eth_tx_parse_bd_e2 *pbd_e2,
3632                                       struct eth_tx_parse_2nd_bd *pbd2,
3633                                       u16 *global_data,
3634                                       u32 xmit_type)
3635 {
3636         u16 hlen_w = 0;
3637         u8 outerip_off, outerip_len = 0;
3638
3639         /* from outer IP to transport */
3640         hlen_w = (skb_inner_transport_header(skb) -
3641                   skb_network_header(skb)) >> 1;
3642
3643         /* transport len */
3644         hlen_w += inner_tcp_hdrlen(skb) >> 1;
3645
3646         pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3647
3648         /* outer IP header info */
3649         if (xmit_type & XMIT_CSUM_V4) {
3650                 struct iphdr *iph = ip_hdr(skb);
3651                 u32 csum = (__force u32)(~iph->check) -
3652                            (__force u32)iph->tot_len -
3653                            (__force u32)iph->frag_off;
3654
3655                 outerip_len = iph->ihl << 1;
3656
3657                 pbd2->fw_ip_csum_wo_len_flags_frag =
3658                         bswab16(csum_fold((__force __wsum)csum));
3659         } else {
3660                 pbd2->fw_ip_hdr_to_payload_w =
3661                         hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3662                 pbd_e2->data.tunnel_data.flags |=
3663                         ETH_TUNNEL_DATA_IPV6_OUTER;
3664         }
3665
3666         pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3667
3668         pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3669
3670         /* inner IP header info */
3671         if (xmit_type & XMIT_CSUM_ENC_V4) {
3672                 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3673
3674                 pbd_e2->data.tunnel_data.pseudo_csum =
3675                         bswab16(~csum_tcpudp_magic(
3676                                         inner_ip_hdr(skb)->saddr,
3677                                         inner_ip_hdr(skb)->daddr,
3678                                         0, IPPROTO_TCP, 0));
3679         } else {
3680                 pbd_e2->data.tunnel_data.pseudo_csum =
3681                         bswab16(~csum_ipv6_magic(
3682                                         &inner_ipv6_hdr(skb)->saddr,
3683                                         &inner_ipv6_hdr(skb)->daddr,
3684                                         0, IPPROTO_TCP, 0));
3685         }
3686
3687         outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3688
3689         *global_data |=
3690                 outerip_off |
3691                 (outerip_len <<
3692                         ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3693                 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3694                         ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3695
3696         if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3697                 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3698                 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3699         }
3700 }
3701
3702 static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3703                                          u32 xmit_type)
3704 {
3705         struct ipv6hdr *ipv6;
3706
3707         if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3708                 return;
3709
3710         if (xmit_type & XMIT_GSO_ENC_V6)
3711                 ipv6 = inner_ipv6_hdr(skb);
3712         else /* XMIT_GSO_V6 */
3713                 ipv6 = ipv6_hdr(skb);
3714
3715         if (ipv6->nexthdr == NEXTHDR_IPV6)
3716                 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3717 }
3718
3719 /* called with netif_tx_lock
3720  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3721  * netif_wake_queue()
3722  */
3723 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3724 {
3725         struct bnx2x *bp = netdev_priv(dev);
3726
3727         struct netdev_queue *txq;
3728         struct bnx2x_fp_txdata *txdata;
3729         struct sw_tx_bd *tx_buf;
3730         struct eth_tx_start_bd *tx_start_bd, *first_bd;
3731         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3732         struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3733         struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3734         struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3735         u32 pbd_e2_parsing_data = 0;
3736         u16 pkt_prod, bd_prod;
3737         int nbd, txq_index;
3738         dma_addr_t mapping;
3739         u32 xmit_type = bnx2x_xmit_type(bp, skb);
3740         int i;
3741         u8 hlen = 0;
3742         __le16 pkt_size = 0;
3743         struct ethhdr *eth;
3744         u8 mac_type = UNICAST_ADDRESS;
3745
3746 #ifdef BNX2X_STOP_ON_ERROR
3747         if (unlikely(bp->panic))
3748                 return NETDEV_TX_BUSY;
3749 #endif
3750
3751         txq_index = skb_get_queue_mapping(skb);
3752         txq = netdev_get_tx_queue(dev, txq_index);
3753
3754         BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3755
3756         txdata = &bp->bnx2x_txq[txq_index];
3757
3758         /* enable this debug print to view the transmission queue being used
3759         DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3760            txq_index, fp_index, txdata_index); */
3761
3762         /* enable this debug print to view the transmission details
3763         DP(NETIF_MSG_TX_QUEUED,
3764            "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3765            txdata->cid, fp_index, txdata_index, txdata, fp); */
3766
3767         if (unlikely(bnx2x_tx_avail(bp, txdata) <
3768                         skb_shinfo(skb)->nr_frags +
3769                         BDS_PER_TX_PKT +
3770                         NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3771                 /* Handle special storage cases separately */
3772                 if (txdata->tx_ring_size == 0) {
3773                         struct bnx2x_eth_q_stats *q_stats =
3774                                 bnx2x_fp_qstats(bp, txdata->parent_fp);
3775                         q_stats->driver_filtered_tx_pkt++;
3776                         dev_kfree_skb(skb);
3777                         return NETDEV_TX_OK;
3778                 }
3779                 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3780                 netif_tx_stop_queue(txq);
3781                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3782
3783                 return NETDEV_TX_BUSY;
3784         }
3785
3786         DP(NETIF_MSG_TX_QUEUED,
3787            "queue[%d]: SKB: summed %x  protocol %x protocol(%x,%x) gso type %x  xmit_type %x len %d\n",
3788            txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3789            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3790            skb->len);
3791
3792         eth = (struct ethhdr *)skb->data;
3793
3794         /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3795         if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3796                 if (is_broadcast_ether_addr(eth->h_dest))
3797                         mac_type = BROADCAST_ADDRESS;
3798                 else
3799                         mac_type = MULTICAST_ADDRESS;
3800         }
3801
3802 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3803         /* First, check if we need to linearize the skb (due to FW
3804            restrictions). No need to check fragmentation if page size > 8K
3805            (there will be no violation to FW restrictions) */
3806         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3807                 /* Statistics of linearization */
3808                 bp->lin_cnt++;
3809                 if (skb_linearize(skb) != 0) {
3810                         DP(NETIF_MSG_TX_QUEUED,
3811                            "SKB linearization failed - silently dropping this SKB\n");
3812                         dev_kfree_skb_any(skb);
3813                         return NETDEV_TX_OK;
3814                 }
3815         }
3816 #endif
3817         /* Map skb linear data for DMA */
3818         mapping = dma_map_single(&bp->pdev->dev, skb->data,
3819                                  skb_headlen(skb), DMA_TO_DEVICE);
3820         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3821                 DP(NETIF_MSG_TX_QUEUED,
3822                    "SKB mapping failed - silently dropping this SKB\n");
3823                 dev_kfree_skb_any(skb);
3824                 return NETDEV_TX_OK;
3825         }
3826         /*
3827         Please read carefully. First we use one BD which we mark as start,
3828         then we have a parsing info BD (used for TSO or xsum),
3829         and only then we have the rest of the TSO BDs.
3830         (don't forget to mark the last one as last,
3831         and to unmap only AFTER you write to the BD ...)
3832         And above all, all pdb sizes are in words - NOT DWORDS!
3833         */
3834
3835         /* get current pkt produced now - advance it just before sending packet
3836          * since mapping of pages may fail and cause packet to be dropped
3837          */
3838         pkt_prod = txdata->tx_pkt_prod;
3839         bd_prod = TX_BD(txdata->tx_bd_prod);
3840
3841         /* get a tx_buf and first BD
3842          * tx_start_bd may be changed during SPLIT,
3843          * but first_bd will always stay first
3844          */
3845         tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3846         tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3847         first_bd = tx_start_bd;
3848
3849         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3850
3851         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3852                 if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3853                         BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3854                 } else if (bp->ptp_tx_skb) {
3855                         BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
3856                 } else {
3857                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3858                         /* schedule check for Tx timestamp */
3859                         bp->ptp_tx_skb = skb_get(skb);
3860                         bp->ptp_tx_start = jiffies;
3861                         schedule_work(&bp->ptp_task);
3862                 }
3863         }
3864
3865         /* header nbd: indirectly zero other flags! */
3866         tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3867
3868         /* remember the first BD of the packet */
3869         tx_buf->first_bd = txdata->tx_bd_prod;
3870         tx_buf->skb = skb;
3871         tx_buf->flags = 0;
3872
3873         DP(NETIF_MSG_TX_QUEUED,
3874            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
3875            pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3876
3877         if (skb_vlan_tag_present(skb)) {
3878                 tx_start_bd->vlan_or_ethertype =
3879                     cpu_to_le16(skb_vlan_tag_get(skb));
3880                 tx_start_bd->bd_flags.as_bitfield |=
3881                     (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3882         } else {
3883                 /* when transmitting in a vf, start bd must hold the ethertype
3884                  * for fw to enforce it
3885                  */
3886 #ifndef BNX2X_STOP_ON_ERROR
3887                 if (IS_VF(bp))
3888 #endif
3889                         tx_start_bd->vlan_or_ethertype =
3890                                 cpu_to_le16(ntohs(eth->h_proto));
3891 #ifndef BNX2X_STOP_ON_ERROR
3892                 else
3893                         /* used by FW for packet accounting */
3894                         tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3895 #endif
3896         }
3897
3898         nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3899
3900         /* turn on parsing and get a BD */
3901         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3902
3903         if (xmit_type & XMIT_CSUM)
3904                 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3905
3906         if (!CHIP_IS_E1x(bp)) {
3907                 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3908                 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3909
3910                 if (xmit_type & XMIT_CSUM_ENC) {
3911                         u16 global_data = 0;
3912
3913                         /* Set PBD in enc checksum offload case */
3914                         hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3915                                                       &pbd_e2_parsing_data,
3916                                                       xmit_type);
3917
3918                         /* turn on 2nd parsing and get a BD */
3919                         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3920
3921                         pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3922
3923                         memset(pbd2, 0, sizeof(*pbd2));
3924
3925                         pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3926                                 (skb_inner_network_header(skb) -
3927                                  skb->data) >> 1;
3928
3929                         if (xmit_type & XMIT_GSO_ENC)
3930                                 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3931                                                           &global_data,
3932                                                           xmit_type);
3933
3934                         pbd2->global_data = cpu_to_le16(global_data);
3935
3936                         /* add addition parse BD indication to start BD */
3937                         SET_FLAG(tx_start_bd->general_data,
3938                                  ETH_TX_START_BD_PARSE_NBDS, 1);
3939                         /* set encapsulation flag in start BD */
3940                         SET_FLAG(tx_start_bd->general_data,
3941                                  ETH_TX_START_BD_TUNNEL_EXIST, 1);
3942
3943                         tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3944
3945                         nbd++;
3946                 } else if (xmit_type & XMIT_CSUM) {
3947                         /* Set PBD in checksum offload case w/o encapsulation */
3948                         hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3949                                                      &pbd_e2_parsing_data,
3950                                                      xmit_type);
3951                 }
3952
3953                 bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
3954                 /* Add the macs to the parsing BD if this is a vf or if
3955                  * Tx Switching is enabled.
3956                  */
3957                 if (IS_VF(bp)) {
3958                         /* override GRE parameters in BD */
3959                         bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3960                                               &pbd_e2->data.mac_addr.src_mid,
3961                                               &pbd_e2->data.mac_addr.src_lo,
3962                                               eth->h_source);
3963
3964                         bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3965                                               &pbd_e2->data.mac_addr.dst_mid,
3966                                               &pbd_e2->data.mac_addr.dst_lo,
3967                                               eth->h_dest);
3968                 } else {
3969                         if (bp->flags & TX_SWITCHING)
3970                                 bnx2x_set_fw_mac_addr(
3971                                                 &pbd_e2->data.mac_addr.dst_hi,
3972                                                 &pbd_e2->data.mac_addr.dst_mid,
3973                                                 &pbd_e2->data.mac_addr.dst_lo,
3974                                                 eth->h_dest);
3975 #ifdef BNX2X_STOP_ON_ERROR
3976                         /* Enforce security is always set in Stop on Error -
3977                          * source mac should be present in the parsing BD
3978                          */
3979                         bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3980                                               &pbd_e2->data.mac_addr.src_mid,
3981                                               &pbd_e2->data.mac_addr.src_lo,
3982                                               eth->h_source);
3983 #endif
3984                 }
3985
3986                 SET_FLAG(pbd_e2_parsing_data,
3987                          ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3988         } else {
3989                 u16 global_data = 0;
3990                 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3991                 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3992                 /* Set PBD in checksum offload case */
3993                 if (xmit_type & XMIT_CSUM)
3994                         hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3995
3996                 SET_FLAG(global_data,
3997                          ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3998                 pbd_e1x->global_data |= cpu_to_le16(global_data);
3999         }
4000
4001         /* Setup the data pointer of the first BD of the packet */
4002         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4003         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4004         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4005         pkt_size = tx_start_bd->nbytes;
4006
4007         DP(NETIF_MSG_TX_QUEUED,
4008            "first bd @%p  addr (%x:%x)  nbytes %d  flags %x  vlan %x\n",
4009            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
4010            le16_to_cpu(tx_start_bd->nbytes),
4011            tx_start_bd->bd_flags.as_bitfield,
4012            le16_to_cpu(tx_start_bd->vlan_or_ethertype));
4013
4014         if (xmit_type & XMIT_GSO) {
4015
4016                 DP(NETIF_MSG_TX_QUEUED,
4017                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
4018                    skb->len, hlen, skb_headlen(skb),
4019                    skb_shinfo(skb)->gso_size);
4020
4021                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4022
4023                 if (unlikely(skb_headlen(skb) > hlen)) {
4024                         nbd++;
4025                         bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4026                                                  &tx_start_bd, hlen,
4027                                                  bd_prod);
4028                 }
4029                 if (!CHIP_IS_E1x(bp))
4030                         pbd_e2_parsing_data |=
4031                                 (skb_shinfo(skb)->gso_size <<
4032                                  ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4033                                  ETH_TX_PARSE_BD_E2_LSO_MSS;
4034                 else
4035                         bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
4036         }
4037
4038         /* Set the PBD's parsing_data field if not zero
4039          * (for the chips newer than 57711).
4040          */
4041         if (pbd_e2_parsing_data)
4042                 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4043
4044         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4045
4046         /* Handle fragmented skb */
4047         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4048                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4049
4050                 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4051                                            skb_frag_size(frag), DMA_TO_DEVICE);
4052                 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
4053                         unsigned int pkts_compl = 0, bytes_compl = 0;
4054
4055                         DP(NETIF_MSG_TX_QUEUED,
4056                            "Unable to map page - dropping packet...\n");
4057
4058                         /* we need unmap all buffers already mapped
4059                          * for this SKB;
4060                          * first_bd->nbd need to be properly updated
4061                          * before call to bnx2x_free_tx_pkt
4062                          */
4063                         first_bd->nbd = cpu_to_le16(nbd);
4064                         bnx2x_free_tx_pkt(bp, txdata,
4065                                           TX_BD(txdata->tx_pkt_prod),
4066                                           &pkts_compl, &bytes_compl);
4067                         return NETDEV_TX_OK;
4068                 }
4069
4070                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4071                 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4072                 if (total_pkt_bd == NULL)
4073                         total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4074
4075                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4076                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4077                 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4078                 le16_add_cpu(&pkt_size, skb_frag_size(frag));
4079                 nbd++;
4080
4081                 DP(NETIF_MSG_TX_QUEUED,
4082                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
4083                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4084                    le16_to_cpu(tx_data_bd->nbytes));
4085         }
4086
4087         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4088
4089         /* update with actual num BDs */
4090         first_bd->nbd = cpu_to_le16(nbd);
4091
4092         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4093
4094         /* now send a tx doorbell, counting the next BD
4095          * if the packet contains or ends with it
4096          */
4097         if (TX_BD_POFF(bd_prod) < nbd)
4098                 nbd++;
4099
4100         /* total_pkt_bytes should be set on the first data BD if
4101          * it's not an LSO packet and there is more than one
4102          * data BD. In this case pkt_size is limited by an MTU value.
4103          * However we prefer to set it for an LSO packet (while we don't
4104          * have to) in order to save some CPU cycles in a none-LSO
4105          * case, when we much more care about them.
4106          */
4107         if (total_pkt_bd != NULL)
4108                 total_pkt_bd->total_pkt_bytes = pkt_size;
4109
4110         if (pbd_e1x)
4111                 DP(NETIF_MSG_TX_QUEUED,
4112                    "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
4113                    pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4114                    pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4115                    pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4116                     le16_to_cpu(pbd_e1x->total_hlen_w));
4117         if (pbd_e2)
4118                 DP(NETIF_MSG_TX_QUEUED,
4119                    "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
4120                    pbd_e2,
4121                    pbd_e2->data.mac_addr.dst_hi,
4122                    pbd_e2->data.mac_addr.dst_mid,
4123                    pbd_e2->data.mac_addr.dst_lo,
4124                    pbd_e2->data.mac_addr.src_hi,
4125                    pbd_e2->data.mac_addr.src_mid,
4126                    pbd_e2->data.mac_addr.src_lo,
4127                    pbd_e2->parsing_data);
4128         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
4129
4130         netdev_tx_sent_queue(txq, skb->len);
4131
4132         skb_tx_timestamp(skb);
4133
4134         txdata->tx_pkt_prod++;
4135         /*
4136          * Make sure that the BD data is updated before updating the producer
4137          * since FW might read the BD right after the producer is updated.
4138          * This is only applicable for weak-ordered memory model archs such
4139          * as IA-64. The following barrier is also mandatory since FW will
4140          * assumes packets must have BDs.
4141          */
4142         wmb();
4143
4144         txdata->tx_db.data.prod += nbd;
4145         barrier();
4146
4147         DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
4148
4149         mmiowb();
4150
4151         txdata->tx_bd_prod += nbd;
4152
4153         if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4154                 netif_tx_stop_queue(txq);
4155
4156                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4157                  * ordering of set_bit() in netif_tx_stop_queue() and read of
4158                  * fp->bd_tx_cons */
4159                 smp_mb();
4160
4161                 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4162                 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4163                         netif_tx_wake_queue(txq);
4164         }
4165         txdata->tx_pkt++;
4166
4167         return NETDEV_TX_OK;
4168 }
4169
4170 void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
4171 {
4172         int mfw_vn = BP_FW_MB_IDX(bp);
4173         u32 tmp;
4174
4175         /* If the shmem shouldn't affect configuration, reflect */
4176         if (!IS_MF_BD(bp)) {
4177                 int i;
4178
4179                 for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
4180                         c2s_map[i] = i;
4181                 *c2s_default = 0;
4182
4183                 return;
4184         }
4185
4186         tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
4187         tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4188         c2s_map[0] = tmp & 0xff;
4189         c2s_map[1] = (tmp >> 8) & 0xff;
4190         c2s_map[2] = (tmp >> 16) & 0xff;
4191         c2s_map[3] = (tmp >> 24) & 0xff;
4192
4193         tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
4194         tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4195         c2s_map[4] = tmp & 0xff;
4196         c2s_map[5] = (tmp >> 8) & 0xff;
4197         c2s_map[6] = (tmp >> 16) & 0xff;
4198         c2s_map[7] = (tmp >> 24) & 0xff;
4199
4200         tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
4201         tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4202         *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
4203 }
4204
4205 /**
4206  * bnx2x_setup_tc - routine to configure net_device for multi tc
4207  *
4208  * @netdev: net device to configure
4209  * @tc: number of traffic classes to enable
4210  *
4211  * callback connected to the ndo_setup_tc function pointer
4212  */
4213 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4214 {
4215         struct bnx2x *bp = netdev_priv(dev);
4216         u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
4217         int cos, prio, count, offset;
4218
4219         /* setup tc must be called under rtnl lock */
4220         ASSERT_RTNL();
4221
4222         /* no traffic classes requested. Aborting */
4223         if (!num_tc) {
4224                 netdev_reset_tc(dev);
4225                 return 0;
4226         }
4227
4228         /* requested to support too many traffic classes */
4229         if (num_tc > bp->max_cos) {
4230                 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4231                           num_tc, bp->max_cos);
4232                 return -EINVAL;
4233         }
4234
4235         /* declare amount of supported traffic classes */
4236         if (netdev_set_num_tc(dev, num_tc)) {
4237                 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4238                 return -EINVAL;
4239         }
4240
4241         bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
4242
4243         /* configure priority to traffic class mapping */
4244         for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4245                 int outer_prio = c2s_map[prio];
4246
4247                 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
4248                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4249                    "mapping priority %d to tc %d\n",
4250                    outer_prio, bp->prio_to_cos[outer_prio]);
4251         }
4252
4253         /* Use this configuration to differentiate tc0 from other COSes
4254            This can be used for ets or pfc, and save the effort of setting
4255            up a multio class queue disc or negotiating DCBX with a switch
4256         netdev_set_prio_tc_map(dev, 0, 0);
4257         DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4258         for (prio = 1; prio < 16; prio++) {
4259                 netdev_set_prio_tc_map(dev, prio, 1);
4260                 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4261         } */
4262
4263         /* configure traffic class to transmission queue mapping */
4264         for (cos = 0; cos < bp->max_cos; cos++) {
4265                 count = BNX2X_NUM_ETH_QUEUES(bp);
4266                 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4267                 netdev_set_tc_queue(dev, cos, count, offset);
4268                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4269                    "mapping tc %d to offset %d count %d\n",
4270                    cos, offset, count);
4271         }
4272
4273         return 0;
4274 }
4275
4276 int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
4277                      struct tc_to_netdev *tc)
4278 {
4279         if (tc->type != TC_SETUP_MQPRIO)
4280                 return -EINVAL;
4281
4282         tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
4283
4284         return bnx2x_setup_tc(dev, tc->mqprio->num_tc);
4285 }
4286
4287 /* called with rtnl_lock */
4288 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4289 {
4290         struct sockaddr *addr = p;
4291         struct bnx2x *bp = netdev_priv(dev);
4292         int rc = 0;
4293
4294         if (!is_valid_ether_addr(addr->sa_data)) {
4295                 BNX2X_ERR("Requested MAC address is not valid\n");
4296                 return -EINVAL;
4297         }
4298
4299         if (IS_MF_STORAGE_ONLY(bp)) {
4300                 BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
4301                 return -EINVAL;
4302         }
4303
4304         if (netif_running(dev))  {
4305                 rc = bnx2x_set_eth_mac(bp, false);
4306                 if (rc)
4307                         return rc;
4308         }
4309
4310         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4311
4312         if (netif_running(dev))
4313                 rc = bnx2x_set_eth_mac(bp, true);
4314
4315         if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4316                 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4317
4318         return rc;
4319 }
4320
4321 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4322 {
4323         union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4324         struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4325         u8 cos;
4326
4327         /* Common */
4328
4329         if (IS_FCOE_IDX(fp_index)) {
4330                 memset(sb, 0, sizeof(union host_hc_status_block));
4331                 fp->status_blk_mapping = 0;
4332         } else {
4333                 /* status blocks */
4334                 if (!CHIP_IS_E1x(bp))
4335                         BNX2X_PCI_FREE(sb->e2_sb,
4336                                        bnx2x_fp(bp, fp_index,
4337                                                 status_blk_mapping),
4338                                        sizeof(struct host_hc_status_block_e2));
4339                 else
4340                         BNX2X_PCI_FREE(sb->e1x_sb,
4341                                        bnx2x_fp(bp, fp_index,
4342                                                 status_blk_mapping),
4343                                        sizeof(struct host_hc_status_block_e1x));
4344         }
4345
4346         /* Rx */
4347         if (!skip_rx_queue(bp, fp_index)) {
4348                 bnx2x_free_rx_bds(fp);
4349
4350                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4351                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4352                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4353                                bnx2x_fp(bp, fp_index, rx_desc_mapping),
4354                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
4355
4356                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4357                                bnx2x_fp(bp, fp_index, rx_comp_mapping),
4358                                sizeof(struct eth_fast_path_rx_cqe) *
4359                                NUM_RCQ_BD);
4360
4361                 /* SGE ring */
4362                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4363                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4364                                bnx2x_fp(bp, fp_index, rx_sge_mapping),
4365                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4366         }
4367
4368         /* Tx */
4369         if (!skip_tx_queue(bp, fp_index)) {
4370                 /* fastpath tx rings: tx_buf tx_desc */
4371                 for_each_cos_in_tx_queue(fp, cos) {
4372                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4373
4374                         DP(NETIF_MSG_IFDOWN,
4375                            "freeing tx memory of fp %d cos %d cid %d\n",
4376                            fp_index, cos, txdata->cid);
4377
4378                         BNX2X_FREE(txdata->tx_buf_ring);
4379                         BNX2X_PCI_FREE(txdata->tx_desc_ring,
4380                                 txdata->tx_desc_mapping,
4381                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4382                 }
4383         }
4384         /* end of fastpath */
4385 }
4386
4387 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4388 {
4389         int i;
4390         for_each_cnic_queue(bp, i)
4391                 bnx2x_free_fp_mem_at(bp, i);
4392 }
4393
4394 void bnx2x_free_fp_mem(struct bnx2x *bp)
4395 {
4396         int i;
4397         for_each_eth_queue(bp, i)
4398                 bnx2x_free_fp_mem_at(bp, i);
4399 }
4400
4401 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4402 {
4403         union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4404         if (!CHIP_IS_E1x(bp)) {
4405                 bnx2x_fp(bp, index, sb_index_values) =
4406                         (__le16 *)status_blk.e2_sb->sb.index_values;
4407                 bnx2x_fp(bp, index, sb_running_index) =
4408                         (__le16 *)status_blk.e2_sb->sb.running_index;
4409         } else {
4410                 bnx2x_fp(bp, index, sb_index_values) =
4411                         (__le16 *)status_blk.e1x_sb->sb.index_values;
4412                 bnx2x_fp(bp, index, sb_running_index) =
4413                         (__le16 *)status_blk.e1x_sb->sb.running_index;
4414         }
4415 }
4416
4417 /* Returns the number of actually allocated BDs */
4418 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4419                               int rx_ring_size)
4420 {
4421         struct bnx2x *bp = fp->bp;
4422         u16 ring_prod, cqe_ring_prod;
4423         int i, failure_cnt = 0;
4424
4425         fp->rx_comp_cons = 0;
4426         cqe_ring_prod = ring_prod = 0;
4427
4428         /* This routine is called only during fo init so
4429          * fp->eth_q_stats.rx_skb_alloc_failed = 0
4430          */
4431         for (i = 0; i < rx_ring_size; i++) {
4432                 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4433                         failure_cnt++;
4434                         continue;
4435                 }
4436                 ring_prod = NEXT_RX_IDX(ring_prod);
4437                 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4438                 WARN_ON(ring_prod <= (i - failure_cnt));
4439         }
4440
4441         if (failure_cnt)
4442                 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4443                           i - failure_cnt, fp->index);
4444
4445         fp->rx_bd_prod = ring_prod;
4446         /* Limit the CQE producer by the CQE ring size */
4447         fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4448                                cqe_ring_prod);
4449
4450         bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4451
4452         return i - failure_cnt;
4453 }
4454
4455 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4456 {
4457         int i;
4458
4459         for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4460                 struct eth_rx_cqe_next_page *nextpg;
4461
4462                 nextpg = (struct eth_rx_cqe_next_page *)
4463                         &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4464                 nextpg->addr_hi =
4465                         cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4466                                    BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4467                 nextpg->addr_lo =
4468                         cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4469                                    BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4470         }
4471 }
4472
4473 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4474 {
4475         union host_hc_status_block *sb;
4476         struct bnx2x_fastpath *fp = &bp->fp[index];
4477         int ring_size = 0;
4478         u8 cos;
4479         int rx_ring_size = 0;
4480
4481         if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
4482                 rx_ring_size = MIN_RX_SIZE_NONTPA;
4483                 bp->rx_ring_size = rx_ring_size;
4484         } else if (!bp->rx_ring_size) {
4485                 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4486
4487                 if (CHIP_IS_E3(bp)) {
4488                         u32 cfg = SHMEM_RD(bp,
4489                                            dev_info.port_hw_config[BP_PORT(bp)].
4490                                            default_cfg);
4491
4492                         /* Decrease ring size for 1G functions */
4493                         if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4494                             PORT_HW_CFG_NET_SERDES_IF_SGMII)
4495                                 rx_ring_size /= 10;
4496                 }
4497
4498                 /* allocate at least number of buffers required by FW */
4499                 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4500                                      MIN_RX_SIZE_TPA, rx_ring_size);
4501
4502                 bp->rx_ring_size = rx_ring_size;
4503         } else /* if rx_ring_size specified - use it */
4504                 rx_ring_size = bp->rx_ring_size;
4505
4506         DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4507
4508         /* Common */
4509         sb = &bnx2x_fp(bp, index, status_blk);
4510
4511         if (!IS_FCOE_IDX(index)) {
4512                 /* status blocks */
4513                 if (!CHIP_IS_E1x(bp)) {
4514                         sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4515                                                     sizeof(struct host_hc_status_block_e2));
4516                         if (!sb->e2_sb)
4517                                 goto alloc_mem_err;
4518                 } else {
4519                         sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4520                                                      sizeof(struct host_hc_status_block_e1x));
4521                         if (!sb->e1x_sb)
4522                                 goto alloc_mem_err;
4523                 }
4524         }
4525
4526         /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4527          * set shortcuts for it.
4528          */
4529         if (!IS_FCOE_IDX(index))
4530                 set_sb_shortcuts(bp, index);
4531
4532         /* Tx */
4533         if (!skip_tx_queue(bp, index)) {
4534                 /* fastpath tx rings: tx_buf tx_desc */
4535                 for_each_cos_in_tx_queue(fp, cos) {
4536                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4537
4538                         DP(NETIF_MSG_IFUP,
4539                            "allocating tx memory of fp %d cos %d\n",
4540                            index, cos);
4541
4542                         txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4543                                                       sizeof(struct sw_tx_bd),
4544                                                       GFP_KERNEL);
4545                         if (!txdata->tx_buf_ring)
4546                                 goto alloc_mem_err;
4547                         txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4548                                                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4549                         if (!txdata->tx_desc_ring)
4550                                 goto alloc_mem_err;
4551                 }
4552         }
4553
4554         /* Rx */
4555         if (!skip_rx_queue(bp, index)) {
4556                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4557                 bnx2x_fp(bp, index, rx_buf_ring) =
4558                         kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4559                 if (!bnx2x_fp(bp, index, rx_buf_ring))
4560                         goto alloc_mem_err;
4561                 bnx2x_fp(bp, index, rx_desc_ring) =
4562                         BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4563                                         sizeof(struct eth_rx_bd) * NUM_RX_BD);
4564                 if (!bnx2x_fp(bp, index, rx_desc_ring))
4565                         goto alloc_mem_err;
4566
4567                 /* Seed all CQEs by 1s */
4568                 bnx2x_fp(bp, index, rx_comp_ring) =
4569                         BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4570                                          sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4571                 if (!bnx2x_fp(bp, index, rx_comp_ring))
4572                         goto alloc_mem_err;
4573
4574                 /* SGE ring */
4575                 bnx2x_fp(bp, index, rx_page_ring) =
4576                         kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4577                                 GFP_KERNEL);
4578                 if (!bnx2x_fp(bp, index, rx_page_ring))
4579                         goto alloc_mem_err;
4580                 bnx2x_fp(bp, index, rx_sge_ring) =
4581                         BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4582                                         BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4583                 if (!bnx2x_fp(bp, index, rx_sge_ring))
4584                         goto alloc_mem_err;
4585                 /* RX BD ring */
4586                 bnx2x_set_next_page_rx_bd(fp);
4587
4588                 /* CQ ring */
4589                 bnx2x_set_next_page_rx_cq(fp);
4590
4591                 /* BDs */
4592                 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4593                 if (ring_size < rx_ring_size)
4594                         goto alloc_mem_err;
4595         }
4596
4597         return 0;
4598
4599 /* handles low memory cases */
4600 alloc_mem_err:
4601         BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4602                                                 index, ring_size);
4603         /* FW will drop all packets if queue is not big enough,
4604          * In these cases we disable the queue
4605          * Min size is different for OOO, TPA and non-TPA queues
4606          */
4607         if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
4608                                 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4609                         /* release memory allocated for this queue */
4610                         bnx2x_free_fp_mem_at(bp, index);
4611                         return -ENOMEM;
4612         }
4613         return 0;
4614 }
4615
4616 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4617 {
4618         if (!NO_FCOE(bp))
4619                 /* FCoE */
4620                 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4621                         /* we will fail load process instead of mark
4622                          * NO_FCOE_FLAG
4623                          */
4624                         return -ENOMEM;
4625
4626         return 0;
4627 }
4628
4629 static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4630 {
4631         int i;
4632
4633         /* 1. Allocate FP for leading - fatal if error
4634          * 2. Allocate RSS - fix number of queues if error
4635          */
4636
4637         /* leading */
4638         if (bnx2x_alloc_fp_mem_at(bp, 0))
4639                 return -ENOMEM;
4640
4641         /* RSS */
4642         for_each_nondefault_eth_queue(bp, i)
4643                 if (bnx2x_alloc_fp_mem_at(bp, i))
4644                         break;
4645
4646         /* handle memory failures */
4647         if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4648                 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4649
4650                 WARN_ON(delta < 0);
4651                 bnx2x_shrink_eth_fp(bp, delta);
4652                 if (CNIC_SUPPORT(bp))
4653                         /* move non eth FPs next to last eth FP
4654                          * must be done in that order
4655                          * FCOE_IDX < FWD_IDX < OOO_IDX
4656                          */
4657
4658                         /* move FCoE fp even NO_FCOE_FLAG is on */
4659                         bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4660                 bp->num_ethernet_queues -= delta;
4661                 bp->num_queues = bp->num_ethernet_queues +
4662                                  bp->num_cnic_queues;
4663                 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4664                           bp->num_queues + delta, bp->num_queues);
4665         }
4666
4667         return 0;
4668 }
4669
4670 void bnx2x_free_mem_bp(struct bnx2x *bp)
4671 {
4672         int i;
4673
4674         for (i = 0; i < bp->fp_array_size; i++)
4675                 kfree(bp->fp[i].tpa_info);
4676         kfree(bp->fp);
4677         kfree(bp->sp_objs);
4678         kfree(bp->fp_stats);
4679         kfree(bp->bnx2x_txq);
4680         kfree(bp->msix_table);
4681         kfree(bp->ilt);
4682 }
4683
4684 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4685 {
4686         struct bnx2x_fastpath *fp;
4687         struct msix_entry *tbl;
4688         struct bnx2x_ilt *ilt;
4689         int msix_table_size = 0;
4690         int fp_array_size, txq_array_size;
4691         int i;
4692
4693         /*
4694          * The biggest MSI-X table we might need is as a maximum number of fast
4695          * path IGU SBs plus default SB (for PF only).
4696          */
4697         msix_table_size = bp->igu_sb_cnt;
4698         if (IS_PF(bp))
4699                 msix_table_size++;
4700         BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4701
4702         /* fp array: RSS plus CNIC related L2 queues */
4703         fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4704         bp->fp_array_size = fp_array_size;
4705         BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4706
4707         fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4708         if (!fp)
4709                 goto alloc_err;
4710         for (i = 0; i < bp->fp_array_size; i++) {
4711                 fp[i].tpa_info =
4712                         kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4713                                 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4714                 if (!(fp[i].tpa_info))
4715                         goto alloc_err;
4716         }
4717
4718         bp->fp = fp;
4719
4720         /* allocate sp objs */
4721         bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4722                               GFP_KERNEL);
4723         if (!bp->sp_objs)
4724                 goto alloc_err;
4725
4726         /* allocate fp_stats */
4727         bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4728                                GFP_KERNEL);
4729         if (!bp->fp_stats)
4730                 goto alloc_err;
4731
4732         /* Allocate memory for the transmission queues array */
4733         txq_array_size =
4734                 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4735         BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4736
4737         bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4738                                 GFP_KERNEL);
4739         if (!bp->bnx2x_txq)
4740                 goto alloc_err;
4741
4742         /* msix table */
4743         tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4744         if (!tbl)
4745                 goto alloc_err;
4746         bp->msix_table = tbl;
4747
4748         /* ilt */
4749         ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4750         if (!ilt)
4751                 goto alloc_err;
4752         bp->ilt = ilt;
4753
4754         return 0;
4755 alloc_err:
4756         bnx2x_free_mem_bp(bp);
4757         return -ENOMEM;
4758 }
4759
4760 int bnx2x_reload_if_running(struct net_device *dev)
4761 {
4762         struct bnx2x *bp = netdev_priv(dev);
4763
4764         if (unlikely(!netif_running(dev)))
4765                 return 0;
4766
4767         bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4768         return bnx2x_nic_load(bp, LOAD_NORMAL);
4769 }
4770
4771 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4772 {
4773         u32 sel_phy_idx = 0;
4774         if (bp->link_params.num_phys <= 1)
4775                 return INT_PHY;
4776
4777         if (bp->link_vars.link_up) {
4778                 sel_phy_idx = EXT_PHY1;
4779                 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4780                 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4781                     (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4782                         sel_phy_idx = EXT_PHY2;
4783         } else {
4784
4785                 switch (bnx2x_phy_selection(&bp->link_params)) {
4786                 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4787                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4788                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4789                        sel_phy_idx = EXT_PHY1;
4790                        break;
4791                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4792                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4793                        sel_phy_idx = EXT_PHY2;
4794                        break;
4795                 }
4796         }
4797
4798         return sel_phy_idx;
4799 }
4800 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4801 {
4802         u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4803         /*
4804          * The selected activated PHY is always after swapping (in case PHY
4805          * swapping is enabled). So when swapping is enabled, we need to reverse
4806          * the configuration
4807          */
4808
4809         if (bp->link_params.multi_phy_config &
4810             PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4811                 if (sel_phy_idx == EXT_PHY1)
4812                         sel_phy_idx = EXT_PHY2;
4813                 else if (sel_phy_idx == EXT_PHY2)
4814                         sel_phy_idx = EXT_PHY1;
4815         }
4816         return LINK_CONFIG_IDX(sel_phy_idx);
4817 }
4818
4819 #ifdef NETDEV_FCOE_WWNN
4820 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4821 {
4822         struct bnx2x *bp = netdev_priv(dev);
4823         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4824
4825         switch (type) {
4826         case NETDEV_FCOE_WWNN:
4827                 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4828                                 cp->fcoe_wwn_node_name_lo);
4829                 break;
4830         case NETDEV_FCOE_WWPN:
4831                 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4832                                 cp->fcoe_wwn_port_name_lo);
4833                 break;
4834         default:
4835                 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4836                 return -EINVAL;
4837         }
4838
4839         return 0;
4840 }
4841 #endif
4842
4843 /* called with rtnl_lock */
4844 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4845 {
4846         struct bnx2x *bp = netdev_priv(dev);
4847
4848         if (pci_num_vf(bp->pdev)) {
4849                 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4850                 return -EPERM;
4851         }
4852
4853         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4854                 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4855                 return -EAGAIN;
4856         }
4857
4858         /* This does not race with packet allocation
4859          * because the actual alloc size is
4860          * only updated as part of load
4861          */
4862         dev->mtu = new_mtu;
4863
4864         if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4865                 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4866
4867         return bnx2x_reload_if_running(dev);
4868 }
4869
4870 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4871                                      netdev_features_t features)
4872 {
4873         struct bnx2x *bp = netdev_priv(dev);
4874
4875         if (pci_num_vf(bp->pdev)) {
4876                 netdev_features_t changed = dev->features ^ features;
4877
4878                 /* Revert the requested changes in features if they
4879                  * would require internal reload of PF in bnx2x_set_features().
4880                  */
4881                 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4882                         features &= ~NETIF_F_RXCSUM;
4883                         features |= dev->features & NETIF_F_RXCSUM;
4884                 }
4885
4886                 if (changed & NETIF_F_LOOPBACK) {
4887                         features &= ~NETIF_F_LOOPBACK;
4888                         features |= dev->features & NETIF_F_LOOPBACK;
4889                 }
4890         }
4891
4892         /* TPA requires Rx CSUM offloading */
4893         if (!(features & NETIF_F_RXCSUM)) {
4894                 features &= ~NETIF_F_LRO;
4895                 features &= ~NETIF_F_GRO;
4896         }
4897
4898         return features;
4899 }
4900
4901 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4902 {
4903         struct bnx2x *bp = netdev_priv(dev);
4904         netdev_features_t changes = features ^ dev->features;
4905         bool bnx2x_reload = false;
4906         int rc;
4907
4908         /* VFs or non SRIOV PFs should be able to change loopback feature */
4909         if (!pci_num_vf(bp->pdev)) {
4910                 if (features & NETIF_F_LOOPBACK) {
4911                         if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4912                                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4913                                 bnx2x_reload = true;
4914                         }
4915                 } else {
4916                         if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4917                                 bp->link_params.loopback_mode = LOOPBACK_NONE;
4918                                 bnx2x_reload = true;
4919                         }
4920                 }
4921         }
4922
4923         /* if GRO is changed while LRO is enabled, don't force a reload */
4924         if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
4925                 changes &= ~NETIF_F_GRO;
4926
4927         /* if GRO is changed while HW TPA is off, don't force a reload */
4928         if ((changes & NETIF_F_GRO) && bp->disable_tpa)
4929                 changes &= ~NETIF_F_GRO;
4930
4931         if (changes)
4932                 bnx2x_reload = true;
4933
4934         if (bnx2x_reload) {
4935                 if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4936                         dev->features = features;
4937                         rc = bnx2x_reload_if_running(dev);
4938                         return rc ? rc : 1;
4939                 }
4940                 /* else: bnx2x_nic_load() will be called at end of recovery */
4941         }
4942
4943         return 0;
4944 }
4945
4946 void bnx2x_tx_timeout(struct net_device *dev)
4947 {
4948         struct bnx2x *bp = netdev_priv(dev);
4949
4950 #ifdef BNX2X_STOP_ON_ERROR
4951         if (!bp->panic)
4952                 bnx2x_panic();
4953 #endif
4954
4955         /* This allows the netif to be shutdown gracefully before resetting */
4956         bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
4957 }
4958
4959 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4960 {
4961         struct net_device *dev = pci_get_drvdata(pdev);
4962         struct bnx2x *bp;
4963
4964         if (!dev) {
4965                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4966                 return -ENODEV;
4967         }
4968         bp = netdev_priv(dev);
4969
4970         rtnl_lock();
4971
4972         pci_save_state(pdev);
4973
4974         if (!netif_running(dev)) {
4975                 rtnl_unlock();
4976                 return 0;
4977         }
4978
4979         netif_device_detach(dev);
4980
4981         bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4982
4983         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4984
4985         rtnl_unlock();
4986
4987         return 0;
4988 }
4989
4990 int bnx2x_resume(struct pci_dev *pdev)
4991 {
4992         struct net_device *dev = pci_get_drvdata(pdev);
4993         struct bnx2x *bp;
4994         int rc;
4995
4996         if (!dev) {
4997                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4998                 return -ENODEV;
4999         }
5000         bp = netdev_priv(dev);
5001
5002         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
5003                 BNX2X_ERR("Handling parity error recovery. Try again later\n");
5004                 return -EAGAIN;
5005         }
5006
5007         rtnl_lock();
5008
5009         pci_restore_state(pdev);
5010
5011         if (!netif_running(dev)) {
5012                 rtnl_unlock();
5013                 return 0;
5014         }
5015
5016         bnx2x_set_power_state(bp, PCI_D0);
5017         netif_device_attach(dev);
5018
5019         rc = bnx2x_nic_load(bp, LOAD_OPEN);
5020
5021         rtnl_unlock();
5022
5023         return rc;
5024 }
5025
5026 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5027                               u32 cid)
5028 {
5029         if (!cxt) {
5030                 BNX2X_ERR("bad context pointer %p\n", cxt);
5031                 return;
5032         }
5033
5034         /* ustorm cxt validation */
5035         cxt->ustorm_ag_context.cdu_usage =
5036                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5037                         CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5038         /* xcontext validation */
5039         cxt->xstorm_ag_context.cdu_reserved =
5040                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5041                         CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5042 }
5043
5044 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5045                                     u8 fw_sb_id, u8 sb_index,
5046                                     u8 ticks)
5047 {
5048         u32 addr = BAR_CSTRORM_INTMEM +
5049                    CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5050         REG_WR8(bp, addr, ticks);
5051         DP(NETIF_MSG_IFUP,
5052            "port %x fw_sb_id %d sb_index %d ticks %d\n",
5053            port, fw_sb_id, sb_index, ticks);
5054 }
5055
5056 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5057                                     u16 fw_sb_id, u8 sb_index,
5058                                     u8 disable)
5059 {
5060         u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5061         u32 addr = BAR_CSTRORM_INTMEM +
5062                    CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
5063         u8 flags = REG_RD8(bp, addr);
5064         /* clear and set */
5065         flags &= ~HC_INDEX_DATA_HC_ENABLED;
5066         flags |= enable_flag;
5067         REG_WR8(bp, addr, flags);
5068         DP(NETIF_MSG_IFUP,
5069            "port %x fw_sb_id %d sb_index %d disable %d\n",
5070            port, fw_sb_id, sb_index, disable);
5071 }
5072
5073 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5074                                     u8 sb_index, u8 disable, u16 usec)
5075 {
5076         int port = BP_PORT(bp);
5077         u8 ticks = usec / BNX2X_BTR;
5078
5079         storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5080
5081         disable = disable ? 1 : (usec ? 0 : 1);
5082         storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5083 }
5084
5085 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5086                             u32 verbose)
5087 {
5088         smp_mb__before_atomic();
5089         set_bit(flag, &bp->sp_rtnl_state);
5090         smp_mb__after_atomic();
5091         DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5092            flag);
5093         schedule_delayed_work(&bp->sp_rtnl_task, 0);
5094 }