]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/brocade/bna/bnad.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc
[karo-tx-linux.git] / drivers / net / ethernet / brocade / bna / bnad.c
1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18 #include <linux/bitops.h>
19 #include <linux/netdevice.h>
20 #include <linux/skbuff.h>
21 #include <linux/etherdevice.h>
22 #include <linux/in.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_vlan.h>
25 #include <linux/if_ether.h>
26 #include <linux/ip.h>
27 #include <linux/prefetch.h>
28
29 #include "bnad.h"
30 #include "bna.h"
31 #include "cna.h"
32
33 static DEFINE_MUTEX(bnad_fwimg_mutex);
34
35 /*
36  * Module params
37  */
38 static uint bnad_msix_disable;
39 module_param(bnad_msix_disable, uint, 0444);
40 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
41
42 static uint bnad_ioc_auto_recover = 1;
43 module_param(bnad_ioc_auto_recover, uint, 0444);
44 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
45
46 /*
47  * Global variables
48  */
49 u32 bnad_rxqs_per_cq = 2;
50
51 static const u8 bnad_bcast_addr[] =  {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
52
53 /*
54  * Local MACROS
55  */
56 #define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
57
58 #define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
59
60 #define BNAD_GET_MBOX_IRQ(_bnad)                                \
61         (((_bnad)->cfg_flags & BNAD_CF_MSIX) ?                  \
62          ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
63          ((_bnad)->pcidev->irq))
64
65 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth)       \
66 do {                                                            \
67         (_res_info)->res_type = BNA_RES_T_MEM;                  \
68         (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA;   \
69         (_res_info)->res_u.mem_info.num = (_num);               \
70         (_res_info)->res_u.mem_info.len =                       \
71         sizeof(struct bnad_unmap_q) +                           \
72         (sizeof(struct bnad_skb_unmap) * ((_depth) - 1));       \
73 } while (0)
74
75 #define BNAD_TXRX_SYNC_MDELAY   250     /* 250 msecs */
76
77 /*
78  * Reinitialize completions in CQ, once Rx is taken down
79  */
80 static void
81 bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
82 {
83         struct bna_cq_entry *cmpl, *next_cmpl;
84         unsigned int wi_range, wis = 0, ccb_prod = 0;
85         int i;
86
87         BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
88                             wi_range);
89
90         for (i = 0; i < ccb->q_depth; i++) {
91                 wis++;
92                 if (likely(--wi_range))
93                         next_cmpl = cmpl + 1;
94                 else {
95                         BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
96                         wis = 0;
97                         BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
98                                                 next_cmpl, wi_range);
99                 }
100                 cmpl->valid = 0;
101                 cmpl = next_cmpl;
102         }
103 }
104
105 static u32
106 bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
107         u32 index, u32 depth, struct sk_buff *skb, u32 frag)
108 {
109         int j;
110         array[index].skb = NULL;
111
112         dma_unmap_single(pdev, dma_unmap_addr(&array[index], dma_addr),
113                         skb_headlen(skb), DMA_TO_DEVICE);
114         dma_unmap_addr_set(&array[index], dma_addr, 0);
115         BNA_QE_INDX_ADD(index, 1, depth);
116
117         for (j = 0; j < frag; j++) {
118                 dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr),
119                           skb_frag_size(&skb_shinfo(skb)->frags[j]), DMA_TO_DEVICE);
120                 dma_unmap_addr_set(&array[index], dma_addr, 0);
121                 BNA_QE_INDX_ADD(index, 1, depth);
122         }
123
124         return index;
125 }
126
127 /*
128  * Frees all pending Tx Bufs
129  * At this point no activity is expected on the Q,
130  * so DMA unmap & freeing is fine.
131  */
132 static void
133 bnad_free_all_txbufs(struct bnad *bnad,
134                  struct bna_tcb *tcb)
135 {
136         u32             unmap_cons;
137         struct bnad_unmap_q *unmap_q = tcb->unmap_q;
138         struct bnad_skb_unmap *unmap_array;
139         struct sk_buff          *skb = NULL;
140         int                     q;
141
142         unmap_array = unmap_q->unmap_array;
143
144         for (q = 0; q < unmap_q->q_depth; q++) {
145                 skb = unmap_array[q].skb;
146                 if (!skb)
147                         continue;
148
149                 unmap_cons = q;
150                 unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
151                                 unmap_cons, unmap_q->q_depth, skb,
152                                 skb_shinfo(skb)->nr_frags);
153
154                 dev_kfree_skb_any(skb);
155         }
156 }
157
158 /* Data Path Handlers */
159
160 /*
161  * bnad_free_txbufs : Frees the Tx bufs on Tx completion
162  * Can be called in a) Interrupt context
163  *                  b) Sending context
164  *                  c) Tasklet context
165  */
166 static u32
167 bnad_free_txbufs(struct bnad *bnad,
168                  struct bna_tcb *tcb)
169 {
170         u32             unmap_cons, sent_packets = 0, sent_bytes = 0;
171         u16             wis, updated_hw_cons;
172         struct bnad_unmap_q *unmap_q = tcb->unmap_q;
173         struct bnad_skb_unmap *unmap_array;
174         struct sk_buff          *skb;
175
176         /*
177          * Just return if TX is stopped. This check is useful
178          * when bnad_free_txbufs() runs out of a tasklet scheduled
179          * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
180          * but this routine runs actually after the cleanup has been
181          * executed.
182          */
183         if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
184                 return 0;
185
186         updated_hw_cons = *(tcb->hw_consumer_index);
187
188         wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
189                                   updated_hw_cons, tcb->q_depth);
190
191         BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
192
193         unmap_array = unmap_q->unmap_array;
194         unmap_cons = unmap_q->consumer_index;
195
196         prefetch(&unmap_array[unmap_cons + 1]);
197         while (wis) {
198                 skb = unmap_array[unmap_cons].skb;
199
200                 sent_packets++;
201                 sent_bytes += skb->len;
202                 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
203
204                 unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
205                                 unmap_cons, unmap_q->q_depth, skb,
206                                 skb_shinfo(skb)->nr_frags);
207
208                 dev_kfree_skb_any(skb);
209         }
210
211         /* Update consumer pointers. */
212         tcb->consumer_index = updated_hw_cons;
213         unmap_q->consumer_index = unmap_cons;
214
215         tcb->txq->tx_packets += sent_packets;
216         tcb->txq->tx_bytes += sent_bytes;
217
218         return sent_packets;
219 }
220
221 /* Tx Free Tasklet function */
222 /* Frees for all the tcb's in all the Tx's */
223 /*
224  * Scheduled from sending context, so that
225  * the fat Tx lock is not held for too long
226  * in the sending context.
227  */
228 static void
229 bnad_tx_free_tasklet(unsigned long bnad_ptr)
230 {
231         struct bnad *bnad = (struct bnad *)bnad_ptr;
232         struct bna_tcb *tcb;
233         u32             acked = 0;
234         int                     i, j;
235
236         for (i = 0; i < bnad->num_tx; i++) {
237                 for (j = 0; j < bnad->num_txq_per_tx; j++) {
238                         tcb = bnad->tx_info[i].tcb[j];
239                         if (!tcb)
240                                 continue;
241                         if (((u16) (*tcb->hw_consumer_index) !=
242                                 tcb->consumer_index) &&
243                                 (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
244                                                   &tcb->flags))) {
245                                 acked = bnad_free_txbufs(bnad, tcb);
246                                 if (likely(test_bit(BNAD_TXQ_TX_STARTED,
247                                         &tcb->flags)))
248                                         bna_ib_ack(tcb->i_dbell, acked);
249                                 smp_mb__before_clear_bit();
250                                 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
251                         }
252                         if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
253                                                 &tcb->flags)))
254                                 continue;
255                         if (netif_queue_stopped(bnad->netdev)) {
256                                 if (acked && netif_carrier_ok(bnad->netdev) &&
257                                         BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
258                                                 BNAD_NETIF_WAKE_THRESHOLD) {
259                                         netif_wake_queue(bnad->netdev);
260                                         /* TODO */
261                                         /* Counters for individual TxQs? */
262                                         BNAD_UPDATE_CTR(bnad,
263                                                 netif_queue_wakeup);
264                                 }
265                         }
266                 }
267         }
268 }
269
270 static u32
271 bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
272 {
273         struct net_device *netdev = bnad->netdev;
274         u32 sent = 0;
275
276         if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
277                 return 0;
278
279         sent = bnad_free_txbufs(bnad, tcb);
280         if (sent) {
281                 if (netif_queue_stopped(netdev) &&
282                     netif_carrier_ok(netdev) &&
283                     BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
284                                     BNAD_NETIF_WAKE_THRESHOLD) {
285                         if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
286                                 netif_wake_queue(netdev);
287                                 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
288                         }
289                 }
290         }
291
292         if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
293                 bna_ib_ack(tcb->i_dbell, sent);
294
295         smp_mb__before_clear_bit();
296         clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
297
298         return sent;
299 }
300
301 /* MSIX Tx Completion Handler */
302 static irqreturn_t
303 bnad_msix_tx(int irq, void *data)
304 {
305         struct bna_tcb *tcb = (struct bna_tcb *)data;
306         struct bnad *bnad = tcb->bnad;
307
308         bnad_tx(bnad, tcb);
309
310         return IRQ_HANDLED;
311 }
312
313 static void
314 bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
315 {
316         struct bnad_unmap_q *unmap_q = rcb->unmap_q;
317
318         rcb->producer_index = 0;
319         rcb->consumer_index = 0;
320
321         unmap_q->producer_index = 0;
322         unmap_q->consumer_index = 0;
323 }
324
325 static void
326 bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
327 {
328         struct bnad_unmap_q *unmap_q;
329         struct bnad_skb_unmap *unmap_array;
330         struct sk_buff *skb;
331         int unmap_cons;
332
333         unmap_q = rcb->unmap_q;
334         unmap_array = unmap_q->unmap_array;
335         for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
336                 skb = unmap_array[unmap_cons].skb;
337                 if (!skb)
338                         continue;
339                 unmap_array[unmap_cons].skb = NULL;
340                 dma_unmap_single(&bnad->pcidev->dev,
341                                  dma_unmap_addr(&unmap_array[unmap_cons],
342                                                 dma_addr),
343                                  rcb->rxq->buffer_size,
344                                  DMA_FROM_DEVICE);
345                 dev_kfree_skb(skb);
346         }
347         bnad_reset_rcb(bnad, rcb);
348 }
349
350 static void
351 bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
352 {
353         u16 to_alloc, alloced, unmap_prod, wi_range;
354         struct bnad_unmap_q *unmap_q = rcb->unmap_q;
355         struct bnad_skb_unmap *unmap_array;
356         struct bna_rxq_entry *rxent;
357         struct sk_buff *skb;
358         dma_addr_t dma_addr;
359
360         alloced = 0;
361         to_alloc =
362                 BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
363
364         unmap_array = unmap_q->unmap_array;
365         unmap_prod = unmap_q->producer_index;
366
367         BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
368
369         while (to_alloc--) {
370                 if (!wi_range)
371                         BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
372                                              wi_range);
373                 skb = netdev_alloc_skb_ip_align(bnad->netdev,
374                                                 rcb->rxq->buffer_size);
375                 if (unlikely(!skb)) {
376                         BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
377                         rcb->rxq->rxbuf_alloc_failed++;
378                         goto finishing;
379                 }
380                 unmap_array[unmap_prod].skb = skb;
381                 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
382                                           rcb->rxq->buffer_size,
383                                           DMA_FROM_DEVICE);
384                 dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
385                                    dma_addr);
386                 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
387                 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
388
389                 rxent++;
390                 wi_range--;
391                 alloced++;
392         }
393
394 finishing:
395         if (likely(alloced)) {
396                 unmap_q->producer_index = unmap_prod;
397                 rcb->producer_index = unmap_prod;
398                 smp_mb();
399                 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
400                         bna_rxq_prod_indx_doorbell(rcb);
401         }
402 }
403
404 static inline void
405 bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
406 {
407         struct bnad_unmap_q *unmap_q = rcb->unmap_q;
408
409         if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
410                 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
411                          >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
412                         bnad_alloc_n_post_rxbufs(bnad, rcb);
413                 smp_mb__before_clear_bit();
414                 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
415         }
416 }
417
418 static u32
419 bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
420 {
421         struct bna_cq_entry *cmpl, *next_cmpl;
422         struct bna_rcb *rcb = NULL;
423         unsigned int wi_range, packets = 0, wis = 0;
424         struct bnad_unmap_q *unmap_q;
425         struct bnad_skb_unmap *unmap_array;
426         struct sk_buff *skb;
427         u32 flags, unmap_cons;
428         struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
429         struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
430
431         set_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
432
433         if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) {
434                 clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
435                 return 0;
436         }
437
438         prefetch(bnad->netdev);
439         BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
440                             wi_range);
441         BUG_ON(!(wi_range <= ccb->q_depth));
442         while (cmpl->valid && packets < budget) {
443                 packets++;
444                 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
445
446                 if (bna_is_small_rxq(cmpl->rxq_id))
447                         rcb = ccb->rcb[1];
448                 else
449                         rcb = ccb->rcb[0];
450
451                 unmap_q = rcb->unmap_q;
452                 unmap_array = unmap_q->unmap_array;
453                 unmap_cons = unmap_q->consumer_index;
454
455                 skb = unmap_array[unmap_cons].skb;
456                 BUG_ON(!(skb));
457                 unmap_array[unmap_cons].skb = NULL;
458                 dma_unmap_single(&bnad->pcidev->dev,
459                                  dma_unmap_addr(&unmap_array[unmap_cons],
460                                                 dma_addr),
461                                  rcb->rxq->buffer_size,
462                                  DMA_FROM_DEVICE);
463                 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
464
465                 /* Should be more efficient ? Performance ? */
466                 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
467
468                 wis++;
469                 if (likely(--wi_range))
470                         next_cmpl = cmpl + 1;
471                 else {
472                         BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
473                         wis = 0;
474                         BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
475                                                 next_cmpl, wi_range);
476                         BUG_ON(!(wi_range <= ccb->q_depth));
477                 }
478                 prefetch(next_cmpl);
479
480                 flags = ntohl(cmpl->flags);
481                 if (unlikely
482                     (flags &
483                      (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
484                       BNA_CQ_EF_TOO_LONG))) {
485                         dev_kfree_skb_any(skb);
486                         rcb->rxq->rx_packets_with_error++;
487                         goto next;
488                 }
489
490                 skb_put(skb, ntohs(cmpl->length));
491                 if (likely
492                     ((bnad->netdev->features & NETIF_F_RXCSUM) &&
493                      (((flags & BNA_CQ_EF_IPV4) &&
494                       (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
495                       (flags & BNA_CQ_EF_IPV6)) &&
496                       (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
497                       (flags & BNA_CQ_EF_L4_CKSUM_OK)))
498                         skb->ip_summed = CHECKSUM_UNNECESSARY;
499                 else
500                         skb_checksum_none_assert(skb);
501
502                 rcb->rxq->rx_packets++;
503                 rcb->rxq->rx_bytes += skb->len;
504                 skb->protocol = eth_type_trans(skb, bnad->netdev);
505
506                 if (flags & BNA_CQ_EF_VLAN)
507                         __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag));
508
509                 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
510                         napi_gro_receive(&rx_ctrl->napi, skb);
511                 else {
512                         netif_receive_skb(skb);
513                 }
514
515 next:
516                 cmpl->valid = 0;
517                 cmpl = next_cmpl;
518         }
519
520         BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
521
522         if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
523                 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
524
525         bnad_refill_rxq(bnad, ccb->rcb[0]);
526         if (ccb->rcb[1])
527                 bnad_refill_rxq(bnad, ccb->rcb[1]);
528
529         clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
530
531         return packets;
532 }
533
534 static void
535 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
536 {
537         struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
538         struct napi_struct *napi = &rx_ctrl->napi;
539
540         if (likely(napi_schedule_prep(napi))) {
541                 __napi_schedule(napi);
542                 rx_ctrl->rx_schedule++;
543         }
544 }
545
546 /* MSIX Rx Path Handler */
547 static irqreturn_t
548 bnad_msix_rx(int irq, void *data)
549 {
550         struct bna_ccb *ccb = (struct bna_ccb *)data;
551
552         if (ccb) {
553                 ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
554                 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
555         }
556
557         return IRQ_HANDLED;
558 }
559
560 /* Interrupt handlers */
561
562 /* Mbox Interrupt Handlers */
563 static irqreturn_t
564 bnad_msix_mbox_handler(int irq, void *data)
565 {
566         u32 intr_status;
567         unsigned long flags;
568         struct bnad *bnad = (struct bnad *)data;
569
570         spin_lock_irqsave(&bnad->bna_lock, flags);
571         if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
572                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
573                 return IRQ_HANDLED;
574         }
575
576         bna_intr_status_get(&bnad->bna, intr_status);
577
578         if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
579                 bna_mbox_handler(&bnad->bna, intr_status);
580
581         spin_unlock_irqrestore(&bnad->bna_lock, flags);
582
583         return IRQ_HANDLED;
584 }
585
586 static irqreturn_t
587 bnad_isr(int irq, void *data)
588 {
589         int i, j;
590         u32 intr_status;
591         unsigned long flags;
592         struct bnad *bnad = (struct bnad *)data;
593         struct bnad_rx_info *rx_info;
594         struct bnad_rx_ctrl *rx_ctrl;
595         struct bna_tcb *tcb = NULL;
596
597         spin_lock_irqsave(&bnad->bna_lock, flags);
598         if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
599                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
600                 return IRQ_NONE;
601         }
602
603         bna_intr_status_get(&bnad->bna, intr_status);
604
605         if (unlikely(!intr_status)) {
606                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
607                 return IRQ_NONE;
608         }
609
610         if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
611                 bna_mbox_handler(&bnad->bna, intr_status);
612
613         spin_unlock_irqrestore(&bnad->bna_lock, flags);
614
615         if (!BNA_IS_INTX_DATA_INTR(intr_status))
616                 return IRQ_HANDLED;
617
618         /* Process data interrupts */
619         /* Tx processing */
620         for (i = 0; i < bnad->num_tx; i++) {
621                 for (j = 0; j < bnad->num_txq_per_tx; j++) {
622                         tcb = bnad->tx_info[i].tcb[j];
623                         if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
624                                 bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
625                 }
626         }
627         /* Rx processing */
628         for (i = 0; i < bnad->num_rx; i++) {
629                 rx_info = &bnad->rx_info[i];
630                 if (!rx_info->rx)
631                         continue;
632                 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
633                         rx_ctrl = &rx_info->rx_ctrl[j];
634                         if (rx_ctrl->ccb)
635                                 bnad_netif_rx_schedule_poll(bnad,
636                                                             rx_ctrl->ccb);
637                 }
638         }
639         return IRQ_HANDLED;
640 }
641
642 /*
643  * Called in interrupt / callback context
644  * with bna_lock held, so cfg_flags access is OK
645  */
646 static void
647 bnad_enable_mbox_irq(struct bnad *bnad)
648 {
649         clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
650
651         BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
652 }
653
654 /*
655  * Called with bnad->bna_lock held b'cos of
656  * bnad->cfg_flags access.
657  */
658 static void
659 bnad_disable_mbox_irq(struct bnad *bnad)
660 {
661         set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
662
663         BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
664 }
665
666 static void
667 bnad_set_netdev_perm_addr(struct bnad *bnad)
668 {
669         struct net_device *netdev = bnad->netdev;
670
671         memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
672         if (is_zero_ether_addr(netdev->dev_addr))
673                 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
674 }
675
676 /* Control Path Handlers */
677
678 /* Callbacks */
679 void
680 bnad_cb_mbox_intr_enable(struct bnad *bnad)
681 {
682         bnad_enable_mbox_irq(bnad);
683 }
684
685 void
686 bnad_cb_mbox_intr_disable(struct bnad *bnad)
687 {
688         bnad_disable_mbox_irq(bnad);
689 }
690
691 void
692 bnad_cb_ioceth_ready(struct bnad *bnad)
693 {
694         bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
695         complete(&bnad->bnad_completions.ioc_comp);
696 }
697
698 void
699 bnad_cb_ioceth_failed(struct bnad *bnad)
700 {
701         bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
702         complete(&bnad->bnad_completions.ioc_comp);
703 }
704
705 void
706 bnad_cb_ioceth_disabled(struct bnad *bnad)
707 {
708         bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
709         complete(&bnad->bnad_completions.ioc_comp);
710 }
711
712 static void
713 bnad_cb_enet_disabled(void *arg)
714 {
715         struct bnad *bnad = (struct bnad *)arg;
716
717         netif_carrier_off(bnad->netdev);
718         complete(&bnad->bnad_completions.enet_comp);
719 }
720
721 void
722 bnad_cb_ethport_link_status(struct bnad *bnad,
723                         enum bna_link_status link_status)
724 {
725         bool link_up = 0;
726
727         link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
728
729         if (link_status == BNA_CEE_UP) {
730                 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
731                         BNAD_UPDATE_CTR(bnad, cee_toggle);
732                 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
733         } else {
734                 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
735                         BNAD_UPDATE_CTR(bnad, cee_toggle);
736                 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
737         }
738
739         if (link_up) {
740                 if (!netif_carrier_ok(bnad->netdev)) {
741                         uint tx_id, tcb_id;
742                         printk(KERN_WARNING "bna: %s link up\n",
743                                 bnad->netdev->name);
744                         netif_carrier_on(bnad->netdev);
745                         BNAD_UPDATE_CTR(bnad, link_toggle);
746                         for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
747                                 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
748                                       tcb_id++) {
749                                         struct bna_tcb *tcb =
750                                         bnad->tx_info[tx_id].tcb[tcb_id];
751                                         u32 txq_id;
752                                         if (!tcb)
753                                                 continue;
754
755                                         txq_id = tcb->id;
756
757                                         if (test_bit(BNAD_TXQ_TX_STARTED,
758                                                      &tcb->flags)) {
759                                                 /*
760                                                  * Force an immediate
761                                                  * Transmit Schedule */
762                                                 printk(KERN_INFO "bna: %s %d "
763                                                       "TXQ_STARTED\n",
764                                                        bnad->netdev->name,
765                                                        txq_id);
766                                                 netif_wake_subqueue(
767                                                                 bnad->netdev,
768                                                                 txq_id);
769                                                 BNAD_UPDATE_CTR(bnad,
770                                                         netif_queue_wakeup);
771                                         } else {
772                                                 netif_stop_subqueue(
773                                                                 bnad->netdev,
774                                                                 txq_id);
775                                                 BNAD_UPDATE_CTR(bnad,
776                                                         netif_queue_stop);
777                                         }
778                                 }
779                         }
780                 }
781         } else {
782                 if (netif_carrier_ok(bnad->netdev)) {
783                         printk(KERN_WARNING "bna: %s link down\n",
784                                 bnad->netdev->name);
785                         netif_carrier_off(bnad->netdev);
786                         BNAD_UPDATE_CTR(bnad, link_toggle);
787                 }
788         }
789 }
790
791 static void
792 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
793 {
794         struct bnad *bnad = (struct bnad *)arg;
795
796         complete(&bnad->bnad_completions.tx_comp);
797 }
798
799 static void
800 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
801 {
802         struct bnad_tx_info *tx_info =
803                         (struct bnad_tx_info *)tcb->txq->tx->priv;
804         struct bnad_unmap_q *unmap_q = tcb->unmap_q;
805
806         tx_info->tcb[tcb->id] = tcb;
807         unmap_q->producer_index = 0;
808         unmap_q->consumer_index = 0;
809         unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
810 }
811
812 static void
813 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
814 {
815         struct bnad_tx_info *tx_info =
816                         (struct bnad_tx_info *)tcb->txq->tx->priv;
817         struct bnad_unmap_q *unmap_q = tcb->unmap_q;
818
819         while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
820                 cpu_relax();
821
822         bnad_free_all_txbufs(bnad, tcb);
823
824         unmap_q->producer_index = 0;
825         unmap_q->consumer_index = 0;
826
827         smp_mb__before_clear_bit();
828         clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
829
830         tx_info->tcb[tcb->id] = NULL;
831 }
832
833 static void
834 bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
835 {
836         struct bnad_unmap_q *unmap_q = rcb->unmap_q;
837
838         unmap_q->producer_index = 0;
839         unmap_q->consumer_index = 0;
840         unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
841 }
842
843 static void
844 bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
845 {
846         bnad_free_all_rxbufs(bnad, rcb);
847 }
848
849 static void
850 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
851 {
852         struct bnad_rx_info *rx_info =
853                         (struct bnad_rx_info *)ccb->cq->rx->priv;
854
855         rx_info->rx_ctrl[ccb->id].ccb = ccb;
856         ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
857 }
858
859 static void
860 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
861 {
862         struct bnad_rx_info *rx_info =
863                         (struct bnad_rx_info *)ccb->cq->rx->priv;
864
865         rx_info->rx_ctrl[ccb->id].ccb = NULL;
866 }
867
868 static void
869 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
870 {
871         struct bnad_tx_info *tx_info =
872                         (struct bnad_tx_info *)tx->priv;
873         struct bna_tcb *tcb;
874         u32 txq_id;
875         int i;
876
877         for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
878                 tcb = tx_info->tcb[i];
879                 if (!tcb)
880                         continue;
881                 txq_id = tcb->id;
882                 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
883                 netif_stop_subqueue(bnad->netdev, txq_id);
884                 printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
885                         bnad->netdev->name, txq_id);
886         }
887 }
888
889 static void
890 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
891 {
892         struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
893         struct bna_tcb *tcb;
894         struct bnad_unmap_q *unmap_q;
895         u32 txq_id;
896         int i;
897
898         for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
899                 tcb = tx_info->tcb[i];
900                 if (!tcb)
901                         continue;
902                 txq_id = tcb->id;
903
904                 unmap_q = tcb->unmap_q;
905
906                 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
907                         continue;
908
909                 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
910                         cpu_relax();
911
912                 bnad_free_all_txbufs(bnad, tcb);
913
914                 unmap_q->producer_index = 0;
915                 unmap_q->consumer_index = 0;
916
917                 smp_mb__before_clear_bit();
918                 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
919
920                 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
921
922                 if (netif_carrier_ok(bnad->netdev)) {
923                         printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
924                                 bnad->netdev->name, txq_id);
925                         netif_wake_subqueue(bnad->netdev, txq_id);
926                         BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
927                 }
928         }
929
930         /*
931          * Workaround for first ioceth enable failure & we
932          * get a 0 MAC address. We try to get the MAC address
933          * again here.
934          */
935         if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
936                 bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
937                 bnad_set_netdev_perm_addr(bnad);
938         }
939 }
940
941 static void
942 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
943 {
944         struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
945         struct bna_tcb *tcb;
946         int i;
947
948         for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
949                 tcb = tx_info->tcb[i];
950                 if (!tcb)
951                         continue;
952         }
953
954         mdelay(BNAD_TXRX_SYNC_MDELAY);
955         bna_tx_cleanup_complete(tx);
956 }
957
958 static void
959 bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
960 {
961         struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
962         struct bna_ccb *ccb;
963         struct bnad_rx_ctrl *rx_ctrl;
964         int i;
965
966         for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
967                 rx_ctrl = &rx_info->rx_ctrl[i];
968                 ccb = rx_ctrl->ccb;
969                 if (!ccb)
970                         continue;
971
972                 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
973
974                 if (ccb->rcb[1])
975                         clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
976         }
977 }
978
979 static void
980 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
981 {
982         struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
983         struct bna_ccb *ccb;
984         struct bnad_rx_ctrl *rx_ctrl;
985         int i;
986
987         mdelay(BNAD_TXRX_SYNC_MDELAY);
988
989         for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
990                 rx_ctrl = &rx_info->rx_ctrl[i];
991                 ccb = rx_ctrl->ccb;
992                 if (!ccb)
993                         continue;
994
995                 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
996
997                 if (ccb->rcb[1])
998                         clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
999
1000                 while (test_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags))
1001                         cpu_relax();
1002         }
1003
1004         bna_rx_cleanup_complete(rx);
1005 }
1006
1007 static void
1008 bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1009 {
1010         struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1011         struct bna_ccb *ccb;
1012         struct bna_rcb *rcb;
1013         struct bnad_rx_ctrl *rx_ctrl;
1014         struct bnad_unmap_q *unmap_q;
1015         int i;
1016         int j;
1017
1018         for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1019                 rx_ctrl = &rx_info->rx_ctrl[i];
1020                 ccb = rx_ctrl->ccb;
1021                 if (!ccb)
1022                         continue;
1023
1024                 bnad_cq_cmpl_init(bnad, ccb);
1025
1026                 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1027                         rcb = ccb->rcb[j];
1028                         if (!rcb)
1029                                 continue;
1030                         bnad_free_all_rxbufs(bnad, rcb);
1031
1032                         set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1033                         set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
1034                         unmap_q = rcb->unmap_q;
1035
1036                         /* Now allocate & post buffers for this RCB */
1037                         /* !!Allocation in callback context */
1038                         if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
1039                                 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
1040                                         >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
1041                                         bnad_alloc_n_post_rxbufs(bnad, rcb);
1042                                         smp_mb__before_clear_bit();
1043                                 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
1044                         }
1045                 }
1046         }
1047 }
1048
1049 static void
1050 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1051 {
1052         struct bnad *bnad = (struct bnad *)arg;
1053
1054         complete(&bnad->bnad_completions.rx_comp);
1055 }
1056
1057 static void
1058 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1059 {
1060         bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1061         complete(&bnad->bnad_completions.mcast_comp);
1062 }
1063
1064 void
1065 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1066                        struct bna_stats *stats)
1067 {
1068         if (status == BNA_CB_SUCCESS)
1069                 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1070
1071         if (!netif_running(bnad->netdev) ||
1072                 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1073                 return;
1074
1075         mod_timer(&bnad->stats_timer,
1076                   jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1077 }
1078
1079 static void
1080 bnad_cb_enet_mtu_set(struct bnad *bnad)
1081 {
1082         bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1083         complete(&bnad->bnad_completions.mtu_comp);
1084 }
1085
1086 /* Resource allocation, free functions */
1087
1088 static void
1089 bnad_mem_free(struct bnad *bnad,
1090               struct bna_mem_info *mem_info)
1091 {
1092         int i;
1093         dma_addr_t dma_pa;
1094
1095         if (mem_info->mdl == NULL)
1096                 return;
1097
1098         for (i = 0; i < mem_info->num; i++) {
1099                 if (mem_info->mdl[i].kva != NULL) {
1100                         if (mem_info->mem_type == BNA_MEM_T_DMA) {
1101                                 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1102                                                 dma_pa);
1103                                 dma_free_coherent(&bnad->pcidev->dev,
1104                                                   mem_info->mdl[i].len,
1105                                                   mem_info->mdl[i].kva, dma_pa);
1106                         } else
1107                                 kfree(mem_info->mdl[i].kva);
1108                 }
1109         }
1110         kfree(mem_info->mdl);
1111         mem_info->mdl = NULL;
1112 }
1113
1114 static int
1115 bnad_mem_alloc(struct bnad *bnad,
1116                struct bna_mem_info *mem_info)
1117 {
1118         int i;
1119         dma_addr_t dma_pa;
1120
1121         if ((mem_info->num == 0) || (mem_info->len == 0)) {
1122                 mem_info->mdl = NULL;
1123                 return 0;
1124         }
1125
1126         mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1127                                 GFP_KERNEL);
1128         if (mem_info->mdl == NULL)
1129                 return -ENOMEM;
1130
1131         if (mem_info->mem_type == BNA_MEM_T_DMA) {
1132                 for (i = 0; i < mem_info->num; i++) {
1133                         mem_info->mdl[i].len = mem_info->len;
1134                         mem_info->mdl[i].kva =
1135                                 dma_alloc_coherent(&bnad->pcidev->dev,
1136                                                 mem_info->len, &dma_pa,
1137                                                 GFP_KERNEL);
1138
1139                         if (mem_info->mdl[i].kva == NULL)
1140                                 goto err_return;
1141
1142                         BNA_SET_DMA_ADDR(dma_pa,
1143                                          &(mem_info->mdl[i].dma));
1144                 }
1145         } else {
1146                 for (i = 0; i < mem_info->num; i++) {
1147                         mem_info->mdl[i].len = mem_info->len;
1148                         mem_info->mdl[i].kva = kzalloc(mem_info->len,
1149                                                         GFP_KERNEL);
1150                         if (mem_info->mdl[i].kva == NULL)
1151                                 goto err_return;
1152                 }
1153         }
1154
1155         return 0;
1156
1157 err_return:
1158         bnad_mem_free(bnad, mem_info);
1159         return -ENOMEM;
1160 }
1161
1162 /* Free IRQ for Mailbox */
1163 static void
1164 bnad_mbox_irq_free(struct bnad *bnad)
1165 {
1166         int irq;
1167         unsigned long flags;
1168
1169         spin_lock_irqsave(&bnad->bna_lock, flags);
1170         bnad_disable_mbox_irq(bnad);
1171         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1172
1173         irq = BNAD_GET_MBOX_IRQ(bnad);
1174         free_irq(irq, bnad);
1175 }
1176
1177 /*
1178  * Allocates IRQ for Mailbox, but keep it disabled
1179  * This will be enabled once we get the mbox enable callback
1180  * from bna
1181  */
1182 static int
1183 bnad_mbox_irq_alloc(struct bnad *bnad)
1184 {
1185         int             err = 0;
1186         unsigned long   irq_flags, flags;
1187         u32     irq;
1188         irq_handler_t   irq_handler;
1189
1190         spin_lock_irqsave(&bnad->bna_lock, flags);
1191         if (bnad->cfg_flags & BNAD_CF_MSIX) {
1192                 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1193                 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1194                 irq_flags = 0;
1195         } else {
1196                 irq_handler = (irq_handler_t)bnad_isr;
1197                 irq = bnad->pcidev->irq;
1198                 irq_flags = IRQF_SHARED;
1199         }
1200
1201         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1202         sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1203
1204         /*
1205          * Set the Mbox IRQ disable flag, so that the IRQ handler
1206          * called from request_irq() for SHARED IRQs do not execute
1207          */
1208         set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1209
1210         BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1211
1212         err = request_irq(irq, irq_handler, irq_flags,
1213                           bnad->mbox_irq_name, bnad);
1214
1215         return err;
1216 }
1217
1218 static void
1219 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1220 {
1221         kfree(intr_info->idl);
1222         intr_info->idl = NULL;
1223 }
1224
1225 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1226 static int
1227 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1228                     u32 txrx_id, struct bna_intr_info *intr_info)
1229 {
1230         int i, vector_start = 0;
1231         u32 cfg_flags;
1232         unsigned long flags;
1233
1234         spin_lock_irqsave(&bnad->bna_lock, flags);
1235         cfg_flags = bnad->cfg_flags;
1236         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1237
1238         if (cfg_flags & BNAD_CF_MSIX) {
1239                 intr_info->intr_type = BNA_INTR_T_MSIX;
1240                 intr_info->idl = kcalloc(intr_info->num,
1241                                         sizeof(struct bna_intr_descr),
1242                                         GFP_KERNEL);
1243                 if (!intr_info->idl)
1244                         return -ENOMEM;
1245
1246                 switch (src) {
1247                 case BNAD_INTR_TX:
1248                         vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1249                         break;
1250
1251                 case BNAD_INTR_RX:
1252                         vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1253                                         (bnad->num_tx * bnad->num_txq_per_tx) +
1254                                         txrx_id;
1255                         break;
1256
1257                 default:
1258                         BUG();
1259                 }
1260
1261                 for (i = 0; i < intr_info->num; i++)
1262                         intr_info->idl[i].vector = vector_start + i;
1263         } else {
1264                 intr_info->intr_type = BNA_INTR_T_INTX;
1265                 intr_info->num = 1;
1266                 intr_info->idl = kcalloc(intr_info->num,
1267                                         sizeof(struct bna_intr_descr),
1268                                         GFP_KERNEL);
1269                 if (!intr_info->idl)
1270                         return -ENOMEM;
1271
1272                 switch (src) {
1273                 case BNAD_INTR_TX:
1274                         intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1275                         break;
1276
1277                 case BNAD_INTR_RX:
1278                         intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1279                         break;
1280                 }
1281         }
1282         return 0;
1283 }
1284
1285 /**
1286  * NOTE: Should be called for MSIX only
1287  * Unregisters Tx MSIX vector(s) from the kernel
1288  */
1289 static void
1290 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1291                         int num_txqs)
1292 {
1293         int i;
1294         int vector_num;
1295
1296         for (i = 0; i < num_txqs; i++) {
1297                 if (tx_info->tcb[i] == NULL)
1298                         continue;
1299
1300                 vector_num = tx_info->tcb[i]->intr_vector;
1301                 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1302         }
1303 }
1304
1305 /**
1306  * NOTE: Should be called for MSIX only
1307  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1308  */
1309 static int
1310 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1311                         u32 tx_id, int num_txqs)
1312 {
1313         int i;
1314         int err;
1315         int vector_num;
1316
1317         for (i = 0; i < num_txqs; i++) {
1318                 vector_num = tx_info->tcb[i]->intr_vector;
1319                 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1320                                 tx_id + tx_info->tcb[i]->id);
1321                 err = request_irq(bnad->msix_table[vector_num].vector,
1322                                   (irq_handler_t)bnad_msix_tx, 0,
1323                                   tx_info->tcb[i]->name,
1324                                   tx_info->tcb[i]);
1325                 if (err)
1326                         goto err_return;
1327         }
1328
1329         return 0;
1330
1331 err_return:
1332         if (i > 0)
1333                 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1334         return -1;
1335 }
1336
1337 /**
1338  * NOTE: Should be called for MSIX only
1339  * Unregisters Rx MSIX vector(s) from the kernel
1340  */
1341 static void
1342 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1343                         int num_rxps)
1344 {
1345         int i;
1346         int vector_num;
1347
1348         for (i = 0; i < num_rxps; i++) {
1349                 if (rx_info->rx_ctrl[i].ccb == NULL)
1350                         continue;
1351
1352                 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1353                 free_irq(bnad->msix_table[vector_num].vector,
1354                          rx_info->rx_ctrl[i].ccb);
1355         }
1356 }
1357
1358 /**
1359  * NOTE: Should be called for MSIX only
1360  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1361  */
1362 static int
1363 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1364                         u32 rx_id, int num_rxps)
1365 {
1366         int i;
1367         int err;
1368         int vector_num;
1369
1370         for (i = 0; i < num_rxps; i++) {
1371                 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1372                 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1373                         bnad->netdev->name,
1374                         rx_id + rx_info->rx_ctrl[i].ccb->id);
1375                 err = request_irq(bnad->msix_table[vector_num].vector,
1376                                   (irq_handler_t)bnad_msix_rx, 0,
1377                                   rx_info->rx_ctrl[i].ccb->name,
1378                                   rx_info->rx_ctrl[i].ccb);
1379                 if (err)
1380                         goto err_return;
1381         }
1382
1383         return 0;
1384
1385 err_return:
1386         if (i > 0)
1387                 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1388         return -1;
1389 }
1390
1391 /* Free Tx object Resources */
1392 static void
1393 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1394 {
1395         int i;
1396
1397         for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1398                 if (res_info[i].res_type == BNA_RES_T_MEM)
1399                         bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1400                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1401                         bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1402         }
1403 }
1404
1405 /* Allocates memory and interrupt resources for Tx object */
1406 static int
1407 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1408                   u32 tx_id)
1409 {
1410         int i, err = 0;
1411
1412         for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1413                 if (res_info[i].res_type == BNA_RES_T_MEM)
1414                         err = bnad_mem_alloc(bnad,
1415                                         &res_info[i].res_u.mem_info);
1416                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1417                         err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1418                                         &res_info[i].res_u.intr_info);
1419                 if (err)
1420                         goto err_return;
1421         }
1422         return 0;
1423
1424 err_return:
1425         bnad_tx_res_free(bnad, res_info);
1426         return err;
1427 }
1428
1429 /* Free Rx object Resources */
1430 static void
1431 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1432 {
1433         int i;
1434
1435         for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1436                 if (res_info[i].res_type == BNA_RES_T_MEM)
1437                         bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1438                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1439                         bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1440         }
1441 }
1442
1443 /* Allocates memory and interrupt resources for Rx object */
1444 static int
1445 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1446                   uint rx_id)
1447 {
1448         int i, err = 0;
1449
1450         /* All memory needs to be allocated before setup_ccbs */
1451         for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1452                 if (res_info[i].res_type == BNA_RES_T_MEM)
1453                         err = bnad_mem_alloc(bnad,
1454                                         &res_info[i].res_u.mem_info);
1455                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1456                         err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1457                                         &res_info[i].res_u.intr_info);
1458                 if (err)
1459                         goto err_return;
1460         }
1461         return 0;
1462
1463 err_return:
1464         bnad_rx_res_free(bnad, res_info);
1465         return err;
1466 }
1467
1468 /* Timer callbacks */
1469 /* a) IOC timer */
1470 static void
1471 bnad_ioc_timeout(unsigned long data)
1472 {
1473         struct bnad *bnad = (struct bnad *)data;
1474         unsigned long flags;
1475
1476         spin_lock_irqsave(&bnad->bna_lock, flags);
1477         bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
1478         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1479 }
1480
1481 static void
1482 bnad_ioc_hb_check(unsigned long data)
1483 {
1484         struct bnad *bnad = (struct bnad *)data;
1485         unsigned long flags;
1486
1487         spin_lock_irqsave(&bnad->bna_lock, flags);
1488         bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
1489         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1490 }
1491
1492 static void
1493 bnad_iocpf_timeout(unsigned long data)
1494 {
1495         struct bnad *bnad = (struct bnad *)data;
1496         unsigned long flags;
1497
1498         spin_lock_irqsave(&bnad->bna_lock, flags);
1499         bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
1500         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1501 }
1502
1503 static void
1504 bnad_iocpf_sem_timeout(unsigned long data)
1505 {
1506         struct bnad *bnad = (struct bnad *)data;
1507         unsigned long flags;
1508
1509         spin_lock_irqsave(&bnad->bna_lock, flags);
1510         bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
1511         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1512 }
1513
1514 /*
1515  * All timer routines use bnad->bna_lock to protect against
1516  * the following race, which may occur in case of no locking:
1517  *      Time    CPU m   CPU n
1518  *      0       1 = test_bit
1519  *      1                       clear_bit
1520  *      2                       del_timer_sync
1521  *      3       mod_timer
1522  */
1523
1524 /* b) Dynamic Interrupt Moderation Timer */
1525 static void
1526 bnad_dim_timeout(unsigned long data)
1527 {
1528         struct bnad *bnad = (struct bnad *)data;
1529         struct bnad_rx_info *rx_info;
1530         struct bnad_rx_ctrl *rx_ctrl;
1531         int i, j;
1532         unsigned long flags;
1533
1534         if (!netif_carrier_ok(bnad->netdev))
1535                 return;
1536
1537         spin_lock_irqsave(&bnad->bna_lock, flags);
1538         for (i = 0; i < bnad->num_rx; i++) {
1539                 rx_info = &bnad->rx_info[i];
1540                 if (!rx_info->rx)
1541                         continue;
1542                 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1543                         rx_ctrl = &rx_info->rx_ctrl[j];
1544                         if (!rx_ctrl->ccb)
1545                                 continue;
1546                         bna_rx_dim_update(rx_ctrl->ccb);
1547                 }
1548         }
1549
1550         /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1551         if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1552                 mod_timer(&bnad->dim_timer,
1553                           jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1554         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1555 }
1556
1557 /* c)  Statistics Timer */
1558 static void
1559 bnad_stats_timeout(unsigned long data)
1560 {
1561         struct bnad *bnad = (struct bnad *)data;
1562         unsigned long flags;
1563
1564         if (!netif_running(bnad->netdev) ||
1565                 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1566                 return;
1567
1568         spin_lock_irqsave(&bnad->bna_lock, flags);
1569         bna_hw_stats_get(&bnad->bna);
1570         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1571 }
1572
1573 /*
1574  * Set up timer for DIM
1575  * Called with bnad->bna_lock held
1576  */
1577 void
1578 bnad_dim_timer_start(struct bnad *bnad)
1579 {
1580         if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1581             !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1582                 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1583                             (unsigned long)bnad);
1584                 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1585                 mod_timer(&bnad->dim_timer,
1586                           jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1587         }
1588 }
1589
1590 /*
1591  * Set up timer for statistics
1592  * Called with mutex_lock(&bnad->conf_mutex) held
1593  */
1594 static void
1595 bnad_stats_timer_start(struct bnad *bnad)
1596 {
1597         unsigned long flags;
1598
1599         spin_lock_irqsave(&bnad->bna_lock, flags);
1600         if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1601                 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1602                             (unsigned long)bnad);
1603                 mod_timer(&bnad->stats_timer,
1604                           jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1605         }
1606         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1607 }
1608
1609 /*
1610  * Stops the stats timer
1611  * Called with mutex_lock(&bnad->conf_mutex) held
1612  */
1613 static void
1614 bnad_stats_timer_stop(struct bnad *bnad)
1615 {
1616         int to_del = 0;
1617         unsigned long flags;
1618
1619         spin_lock_irqsave(&bnad->bna_lock, flags);
1620         if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1621                 to_del = 1;
1622         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1623         if (to_del)
1624                 del_timer_sync(&bnad->stats_timer);
1625 }
1626
1627 /* Utilities */
1628
1629 static void
1630 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1631 {
1632         int i = 1; /* Index 0 has broadcast address */
1633         struct netdev_hw_addr *mc_addr;
1634
1635         netdev_for_each_mc_addr(mc_addr, netdev) {
1636                 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1637                                                         ETH_ALEN);
1638                 i++;
1639         }
1640 }
1641
1642 static int
1643 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1644 {
1645         struct bnad_rx_ctrl *rx_ctrl =
1646                 container_of(napi, struct bnad_rx_ctrl, napi);
1647         struct bnad *bnad = rx_ctrl->bnad;
1648         int rcvd = 0;
1649
1650         rx_ctrl->rx_poll_ctr++;
1651
1652         if (!netif_carrier_ok(bnad->netdev))
1653                 goto poll_exit;
1654
1655         rcvd = bnad_poll_cq(bnad, rx_ctrl->ccb, budget);
1656         if (rcvd >= budget)
1657                 return rcvd;
1658
1659 poll_exit:
1660         napi_complete(napi);
1661
1662         rx_ctrl->rx_complete++;
1663
1664         if (rx_ctrl->ccb)
1665                 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1666
1667         return rcvd;
1668 }
1669
1670 #define BNAD_NAPI_POLL_QUOTA            64
1671 static void
1672 bnad_napi_init(struct bnad *bnad, u32 rx_id)
1673 {
1674         struct bnad_rx_ctrl *rx_ctrl;
1675         int i;
1676
1677         /* Initialize & enable NAPI */
1678         for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1679                 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1680                 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1681                                bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1682         }
1683 }
1684
1685 static void
1686 bnad_napi_enable(struct bnad *bnad, u32 rx_id)
1687 {
1688         struct bnad_rx_ctrl *rx_ctrl;
1689         int i;
1690
1691         /* Initialize & enable NAPI */
1692         for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1693                 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1694
1695                 napi_enable(&rx_ctrl->napi);
1696         }
1697 }
1698
1699 static void
1700 bnad_napi_disable(struct bnad *bnad, u32 rx_id)
1701 {
1702         int i;
1703
1704         /* First disable and then clean up */
1705         for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1706                 napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1707                 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1708         }
1709 }
1710
1711 /* Should be held with conf_lock held */
1712 void
1713 bnad_cleanup_tx(struct bnad *bnad, u32 tx_id)
1714 {
1715         struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1716         struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1717         unsigned long flags;
1718
1719         if (!tx_info->tx)
1720                 return;
1721
1722         init_completion(&bnad->bnad_completions.tx_comp);
1723         spin_lock_irqsave(&bnad->bna_lock, flags);
1724         bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1725         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1726         wait_for_completion(&bnad->bnad_completions.tx_comp);
1727
1728         if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1729                 bnad_tx_msix_unregister(bnad, tx_info,
1730                         bnad->num_txq_per_tx);
1731
1732         if (0 == tx_id)
1733                 tasklet_kill(&bnad->tx_free_tasklet);
1734
1735         spin_lock_irqsave(&bnad->bna_lock, flags);
1736         bna_tx_destroy(tx_info->tx);
1737         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1738
1739         tx_info->tx = NULL;
1740         tx_info->tx_id = 0;
1741
1742         bnad_tx_res_free(bnad, res_info);
1743 }
1744
1745 /* Should be held with conf_lock held */
1746 int
1747 bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1748 {
1749         int err;
1750         struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1751         struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1752         struct bna_intr_info *intr_info =
1753                         &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1754         struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1755         static const struct bna_tx_event_cbfn tx_cbfn = {
1756                 .tcb_setup_cbfn = bnad_cb_tcb_setup,
1757                 .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1758                 .tx_stall_cbfn = bnad_cb_tx_stall,
1759                 .tx_resume_cbfn = bnad_cb_tx_resume,
1760                 .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1761         };
1762
1763         struct bna_tx *tx;
1764         unsigned long flags;
1765
1766         tx_info->tx_id = tx_id;
1767
1768         /* Initialize the Tx object configuration */
1769         tx_config->num_txq = bnad->num_txq_per_tx;
1770         tx_config->txq_depth = bnad->txq_depth;
1771         tx_config->tx_type = BNA_TX_T_REGULAR;
1772         tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1773
1774         /* Get BNA's resource requirement for one tx object */
1775         spin_lock_irqsave(&bnad->bna_lock, flags);
1776         bna_tx_res_req(bnad->num_txq_per_tx,
1777                 bnad->txq_depth, res_info);
1778         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1779
1780         /* Fill Unmap Q memory requirements */
1781         BNAD_FILL_UNMAPQ_MEM_REQ(
1782                         &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1783                         bnad->num_txq_per_tx,
1784                         BNAD_TX_UNMAPQ_DEPTH);
1785
1786         /* Allocate resources */
1787         err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1788         if (err)
1789                 return err;
1790
1791         /* Ask BNA to create one Tx object, supplying required resources */
1792         spin_lock_irqsave(&bnad->bna_lock, flags);
1793         tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1794                         tx_info);
1795         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1796         if (!tx)
1797                 goto err_return;
1798         tx_info->tx = tx;
1799
1800         /* Register ISR for the Tx object */
1801         if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1802                 err = bnad_tx_msix_register(bnad, tx_info,
1803                         tx_id, bnad->num_txq_per_tx);
1804                 if (err)
1805                         goto err_return;
1806         }
1807
1808         spin_lock_irqsave(&bnad->bna_lock, flags);
1809         bna_tx_enable(tx);
1810         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1811
1812         return 0;
1813
1814 err_return:
1815         bnad_tx_res_free(bnad, res_info);
1816         return err;
1817 }
1818
1819 /* Setup the rx config for bna_rx_create */
1820 /* bnad decides the configuration */
1821 static void
1822 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1823 {
1824         rx_config->rx_type = BNA_RX_T_REGULAR;
1825         rx_config->num_paths = bnad->num_rxp_per_rx;
1826         rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
1827
1828         if (bnad->num_rxp_per_rx > 1) {
1829                 rx_config->rss_status = BNA_STATUS_T_ENABLED;
1830                 rx_config->rss_config.hash_type =
1831                                 (BFI_ENET_RSS_IPV6 |
1832                                  BFI_ENET_RSS_IPV6_TCP |
1833                                  BFI_ENET_RSS_IPV4 |
1834                                  BFI_ENET_RSS_IPV4_TCP);
1835                 rx_config->rss_config.hash_mask =
1836                                 bnad->num_rxp_per_rx - 1;
1837                 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1838                         sizeof(rx_config->rss_config.toeplitz_hash_key));
1839         } else {
1840                 rx_config->rss_status = BNA_STATUS_T_DISABLED;
1841                 memset(&rx_config->rss_config, 0,
1842                        sizeof(rx_config->rss_config));
1843         }
1844         rx_config->rxp_type = BNA_RXP_SLR;
1845         rx_config->q_depth = bnad->rxq_depth;
1846
1847         rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
1848
1849         rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1850 }
1851
1852 static void
1853 bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
1854 {
1855         struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1856         int i;
1857
1858         for (i = 0; i < bnad->num_rxp_per_rx; i++)
1859                 rx_info->rx_ctrl[i].bnad = bnad;
1860 }
1861
1862 /* Called with mutex_lock(&bnad->conf_mutex) held */
1863 void
1864 bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
1865 {
1866         struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1867         struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1868         struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1869         unsigned long flags;
1870         int to_del = 0;
1871
1872         if (!rx_info->rx)
1873                 return;
1874
1875         if (0 == rx_id) {
1876                 spin_lock_irqsave(&bnad->bna_lock, flags);
1877                 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1878                     test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1879                         clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1880                         to_del = 1;
1881                 }
1882                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1883                 if (to_del)
1884                         del_timer_sync(&bnad->dim_timer);
1885         }
1886
1887         init_completion(&bnad->bnad_completions.rx_comp);
1888         spin_lock_irqsave(&bnad->bna_lock, flags);
1889         bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
1890         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1891         wait_for_completion(&bnad->bnad_completions.rx_comp);
1892
1893         if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1894                 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1895
1896         bnad_napi_disable(bnad, rx_id);
1897
1898         spin_lock_irqsave(&bnad->bna_lock, flags);
1899         bna_rx_destroy(rx_info->rx);
1900
1901         rx_info->rx = NULL;
1902         rx_info->rx_id = 0;
1903         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1904
1905         bnad_rx_res_free(bnad, res_info);
1906 }
1907
1908 /* Called with mutex_lock(&bnad->conf_mutex) held */
1909 int
1910 bnad_setup_rx(struct bnad *bnad, u32 rx_id)
1911 {
1912         int err;
1913         struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1914         struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1915         struct bna_intr_info *intr_info =
1916                         &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
1917         struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1918         static const struct bna_rx_event_cbfn rx_cbfn = {
1919                 .rcb_setup_cbfn = bnad_cb_rcb_setup,
1920                 .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
1921                 .ccb_setup_cbfn = bnad_cb_ccb_setup,
1922                 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
1923                 .rx_stall_cbfn = bnad_cb_rx_stall,
1924                 .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
1925                 .rx_post_cbfn = bnad_cb_rx_post,
1926         };
1927         struct bna_rx *rx;
1928         unsigned long flags;
1929
1930         rx_info->rx_id = rx_id;
1931
1932         /* Initialize the Rx object configuration */
1933         bnad_init_rx_config(bnad, rx_config);
1934
1935         /* Get BNA's resource requirement for one Rx object */
1936         spin_lock_irqsave(&bnad->bna_lock, flags);
1937         bna_rx_res_req(rx_config, res_info);
1938         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1939
1940         /* Fill Unmap Q memory requirements */
1941         BNAD_FILL_UNMAPQ_MEM_REQ(
1942                         &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1943                         rx_config->num_paths +
1944                         ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
1945                                 rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
1946
1947         /* Allocate resource */
1948         err = bnad_rx_res_alloc(bnad, res_info, rx_id);
1949         if (err)
1950                 return err;
1951
1952         bnad_rx_ctrl_init(bnad, rx_id);
1953
1954         /* Ask BNA to create one Rx object, supplying required resources */
1955         spin_lock_irqsave(&bnad->bna_lock, flags);
1956         rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
1957                         rx_info);
1958         if (!rx) {
1959                 err = -ENOMEM;
1960                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1961                 goto err_return;
1962         }
1963         rx_info->rx = rx;
1964         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1965
1966         /*
1967          * Init NAPI, so that state is set to NAPI_STATE_SCHED,
1968          * so that IRQ handler cannot schedule NAPI at this point.
1969          */
1970         bnad_napi_init(bnad, rx_id);
1971
1972         /* Register ISR for the Rx object */
1973         if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1974                 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
1975                                                 rx_config->num_paths);
1976                 if (err)
1977                         goto err_return;
1978         }
1979
1980         spin_lock_irqsave(&bnad->bna_lock, flags);
1981         if (0 == rx_id) {
1982                 /* Set up Dynamic Interrupt Moderation Vector */
1983                 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
1984                         bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
1985
1986                 /* Enable VLAN filtering only on the default Rx */
1987                 bna_rx_vlanfilter_enable(rx);
1988
1989                 /* Start the DIM timer */
1990                 bnad_dim_timer_start(bnad);
1991         }
1992
1993         bna_rx_enable(rx);
1994         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1995
1996         /* Enable scheduling of NAPI */
1997         bnad_napi_enable(bnad, rx_id);
1998
1999         return 0;
2000
2001 err_return:
2002         bnad_cleanup_rx(bnad, rx_id);
2003         return err;
2004 }
2005
2006 /* Called with conf_lock & bnad->bna_lock held */
2007 void
2008 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2009 {
2010         struct bnad_tx_info *tx_info;
2011
2012         tx_info = &bnad->tx_info[0];
2013         if (!tx_info->tx)
2014                 return;
2015
2016         bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2017 }
2018
2019 /* Called with conf_lock & bnad->bna_lock held */
2020 void
2021 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2022 {
2023         struct bnad_rx_info *rx_info;
2024         int     i;
2025
2026         for (i = 0; i < bnad->num_rx; i++) {
2027                 rx_info = &bnad->rx_info[i];
2028                 if (!rx_info->rx)
2029                         continue;
2030                 bna_rx_coalescing_timeo_set(rx_info->rx,
2031                                 bnad->rx_coalescing_timeo);
2032         }
2033 }
2034
2035 /*
2036  * Called with bnad->bna_lock held
2037  */
2038 int
2039 bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
2040 {
2041         int ret;
2042
2043         if (!is_valid_ether_addr(mac_addr))
2044                 return -EADDRNOTAVAIL;
2045
2046         /* If datapath is down, pretend everything went through */
2047         if (!bnad->rx_info[0].rx)
2048                 return 0;
2049
2050         ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
2051         if (ret != BNA_CB_SUCCESS)
2052                 return -EADDRNOTAVAIL;
2053
2054         return 0;
2055 }
2056
2057 /* Should be called with conf_lock held */
2058 int
2059 bnad_enable_default_bcast(struct bnad *bnad)
2060 {
2061         struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2062         int ret;
2063         unsigned long flags;
2064
2065         init_completion(&bnad->bnad_completions.mcast_comp);
2066
2067         spin_lock_irqsave(&bnad->bna_lock, flags);
2068         ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
2069                                 bnad_cb_rx_mcast_add);
2070         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2071
2072         if (ret == BNA_CB_SUCCESS)
2073                 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2074         else
2075                 return -ENODEV;
2076
2077         if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2078                 return -ENODEV;
2079
2080         return 0;
2081 }
2082
2083 /* Called with mutex_lock(&bnad->conf_mutex) held */
2084 void
2085 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2086 {
2087         u16 vid;
2088         unsigned long flags;
2089
2090         for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2091                 spin_lock_irqsave(&bnad->bna_lock, flags);
2092                 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2093                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2094         }
2095 }
2096
2097 /* Statistics utilities */
2098 void
2099 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2100 {
2101         int i, j;
2102
2103         for (i = 0; i < bnad->num_rx; i++) {
2104                 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2105                         if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2106                                 stats->rx_packets += bnad->rx_info[i].
2107                                 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2108                                 stats->rx_bytes += bnad->rx_info[i].
2109                                         rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2110                                 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2111                                         bnad->rx_info[i].rx_ctrl[j].ccb->
2112                                         rcb[1]->rxq) {
2113                                         stats->rx_packets +=
2114                                                 bnad->rx_info[i].rx_ctrl[j].
2115                                                 ccb->rcb[1]->rxq->rx_packets;
2116                                         stats->rx_bytes +=
2117                                                 bnad->rx_info[i].rx_ctrl[j].
2118                                                 ccb->rcb[1]->rxq->rx_bytes;
2119                                 }
2120                         }
2121                 }
2122         }
2123         for (i = 0; i < bnad->num_tx; i++) {
2124                 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2125                         if (bnad->tx_info[i].tcb[j]) {
2126                                 stats->tx_packets +=
2127                                 bnad->tx_info[i].tcb[j]->txq->tx_packets;
2128                                 stats->tx_bytes +=
2129                                         bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2130                         }
2131                 }
2132         }
2133 }
2134
2135 /*
2136  * Must be called with the bna_lock held.
2137  */
2138 void
2139 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2140 {
2141         struct bfi_enet_stats_mac *mac_stats;
2142         u32 bmap;
2143         int i;
2144
2145         mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2146         stats->rx_errors =
2147                 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2148                 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2149                 mac_stats->rx_undersize;
2150         stats->tx_errors = mac_stats->tx_fcs_error +
2151                                         mac_stats->tx_undersize;
2152         stats->rx_dropped = mac_stats->rx_drop;
2153         stats->tx_dropped = mac_stats->tx_drop;
2154         stats->multicast = mac_stats->rx_multicast;
2155         stats->collisions = mac_stats->tx_total_collision;
2156
2157         stats->rx_length_errors = mac_stats->rx_frame_length_error;
2158
2159         /* receive ring buffer overflow  ?? */
2160
2161         stats->rx_crc_errors = mac_stats->rx_fcs_error;
2162         stats->rx_frame_errors = mac_stats->rx_alignment_error;
2163         /* recv'r fifo overrun */
2164         bmap = bna_rx_rid_mask(&bnad->bna);
2165         for (i = 0; bmap; i++) {
2166                 if (bmap & 1) {
2167                         stats->rx_fifo_errors +=
2168                                 bnad->stats.bna_stats->
2169                                         hw_stats.rxf_stats[i].frame_drops;
2170                         break;
2171                 }
2172                 bmap >>= 1;
2173         }
2174 }
2175
2176 static void
2177 bnad_mbox_irq_sync(struct bnad *bnad)
2178 {
2179         u32 irq;
2180         unsigned long flags;
2181
2182         spin_lock_irqsave(&bnad->bna_lock, flags);
2183         if (bnad->cfg_flags & BNAD_CF_MSIX)
2184                 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2185         else
2186                 irq = bnad->pcidev->irq;
2187         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2188
2189         synchronize_irq(irq);
2190 }
2191
2192 /* Utility used by bnad_start_xmit, for doing TSO */
2193 static int
2194 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2195 {
2196         int err;
2197
2198         if (skb_header_cloned(skb)) {
2199                 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2200                 if (err) {
2201                         BNAD_UPDATE_CTR(bnad, tso_err);
2202                         return err;
2203                 }
2204         }
2205
2206         /*
2207          * For TSO, the TCP checksum field is seeded with pseudo-header sum
2208          * excluding the length field.
2209          */
2210         if (skb->protocol == htons(ETH_P_IP)) {
2211                 struct iphdr *iph = ip_hdr(skb);
2212
2213                 /* Do we really need these? */
2214                 iph->tot_len = 0;
2215                 iph->check = 0;
2216
2217                 tcp_hdr(skb)->check =
2218                         ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2219                                            IPPROTO_TCP, 0);
2220                 BNAD_UPDATE_CTR(bnad, tso4);
2221         } else {
2222                 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2223
2224                 ipv6h->payload_len = 0;
2225                 tcp_hdr(skb)->check =
2226                         ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2227                                          IPPROTO_TCP, 0);
2228                 BNAD_UPDATE_CTR(bnad, tso6);
2229         }
2230
2231         return 0;
2232 }
2233
2234 /*
2235  * Initialize Q numbers depending on Rx Paths
2236  * Called with bnad->bna_lock held, because of cfg_flags
2237  * access.
2238  */
2239 static void
2240 bnad_q_num_init(struct bnad *bnad)
2241 {
2242         int rxps;
2243
2244         rxps = min((uint)num_online_cpus(),
2245                         (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
2246
2247         if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2248                 rxps = 1;       /* INTx */
2249
2250         bnad->num_rx = 1;
2251         bnad->num_tx = 1;
2252         bnad->num_rxp_per_rx = rxps;
2253         bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2254 }
2255
2256 /*
2257  * Adjusts the Q numbers, given a number of msix vectors
2258  * Give preference to RSS as opposed to Tx priority Queues,
2259  * in such a case, just use 1 Tx Q
2260  * Called with bnad->bna_lock held b'cos of cfg_flags access
2261  */
2262 static void
2263 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2264 {
2265         bnad->num_txq_per_tx = 1;
2266         if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx)  +
2267              bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2268             (bnad->cfg_flags & BNAD_CF_MSIX)) {
2269                 bnad->num_rxp_per_rx = msix_vectors -
2270                         (bnad->num_tx * bnad->num_txq_per_tx) -
2271                         BNAD_MAILBOX_MSIX_VECTORS;
2272         } else
2273                 bnad->num_rxp_per_rx = 1;
2274 }
2275
2276 /* Enable / disable ioceth */
2277 static int
2278 bnad_ioceth_disable(struct bnad *bnad)
2279 {
2280         unsigned long flags;
2281         int err = 0;
2282
2283         spin_lock_irqsave(&bnad->bna_lock, flags);
2284         init_completion(&bnad->bnad_completions.ioc_comp);
2285         bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2286         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2287
2288         wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2289                 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2290
2291         err = bnad->bnad_completions.ioc_comp_status;
2292         return err;
2293 }
2294
2295 static int
2296 bnad_ioceth_enable(struct bnad *bnad)
2297 {
2298         int err = 0;
2299         unsigned long flags;
2300
2301         spin_lock_irqsave(&bnad->bna_lock, flags);
2302         init_completion(&bnad->bnad_completions.ioc_comp);
2303         bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2304         bna_ioceth_enable(&bnad->bna.ioceth);
2305         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2306
2307         wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2308                 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2309
2310         err = bnad->bnad_completions.ioc_comp_status;
2311
2312         return err;
2313 }
2314
2315 /* Free BNA resources */
2316 static void
2317 bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2318                 u32 res_val_max)
2319 {
2320         int i;
2321
2322         for (i = 0; i < res_val_max; i++)
2323                 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2324 }
2325
2326 /* Allocates memory and interrupt resources for BNA */
2327 static int
2328 bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2329                 u32 res_val_max)
2330 {
2331         int i, err;
2332
2333         for (i = 0; i < res_val_max; i++) {
2334                 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2335                 if (err)
2336                         goto err_return;
2337         }
2338         return 0;
2339
2340 err_return:
2341         bnad_res_free(bnad, res_info, res_val_max);
2342         return err;
2343 }
2344
2345 /* Interrupt enable / disable */
2346 static void
2347 bnad_enable_msix(struct bnad *bnad)
2348 {
2349         int i, ret;
2350         unsigned long flags;
2351
2352         spin_lock_irqsave(&bnad->bna_lock, flags);
2353         if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2354                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2355                 return;
2356         }
2357         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2358
2359         if (bnad->msix_table)
2360                 return;
2361
2362         bnad->msix_table =
2363                 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2364
2365         if (!bnad->msix_table)
2366                 goto intx_mode;
2367
2368         for (i = 0; i < bnad->msix_num; i++)
2369                 bnad->msix_table[i].entry = i;
2370
2371         ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
2372         if (ret > 0) {
2373                 /* Not enough MSI-X vectors. */
2374                 pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
2375                         ret, bnad->msix_num);
2376
2377                 spin_lock_irqsave(&bnad->bna_lock, flags);
2378                 /* ret = #of vectors that we got */
2379                 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2380                         (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
2381                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2382
2383                 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2384                          BNAD_MAILBOX_MSIX_VECTORS;
2385
2386                 if (bnad->msix_num > ret)
2387                         goto intx_mode;
2388
2389                 /* Try once more with adjusted numbers */
2390                 /* If this fails, fall back to INTx */
2391                 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
2392                                       bnad->msix_num);
2393                 if (ret)
2394                         goto intx_mode;
2395
2396         } else if (ret < 0)
2397                 goto intx_mode;
2398
2399         pci_intx(bnad->pcidev, 0);
2400
2401         return;
2402
2403 intx_mode:
2404         pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
2405
2406         kfree(bnad->msix_table);
2407         bnad->msix_table = NULL;
2408         bnad->msix_num = 0;
2409         spin_lock_irqsave(&bnad->bna_lock, flags);
2410         bnad->cfg_flags &= ~BNAD_CF_MSIX;
2411         bnad_q_num_init(bnad);
2412         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2413 }
2414
2415 static void
2416 bnad_disable_msix(struct bnad *bnad)
2417 {
2418         u32 cfg_flags;
2419         unsigned long flags;
2420
2421         spin_lock_irqsave(&bnad->bna_lock, flags);
2422         cfg_flags = bnad->cfg_flags;
2423         if (bnad->cfg_flags & BNAD_CF_MSIX)
2424                 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2425         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2426
2427         if (cfg_flags & BNAD_CF_MSIX) {
2428                 pci_disable_msix(bnad->pcidev);
2429                 kfree(bnad->msix_table);
2430                 bnad->msix_table = NULL;
2431         }
2432 }
2433
2434 /* Netdev entry points */
2435 static int
2436 bnad_open(struct net_device *netdev)
2437 {
2438         int err;
2439         struct bnad *bnad = netdev_priv(netdev);
2440         struct bna_pause_config pause_config;
2441         int mtu;
2442         unsigned long flags;
2443
2444         mutex_lock(&bnad->conf_mutex);
2445
2446         /* Tx */
2447         err = bnad_setup_tx(bnad, 0);
2448         if (err)
2449                 goto err_return;
2450
2451         /* Rx */
2452         err = bnad_setup_rx(bnad, 0);
2453         if (err)
2454                 goto cleanup_tx;
2455
2456         /* Port */
2457         pause_config.tx_pause = 0;
2458         pause_config.rx_pause = 0;
2459
2460         mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
2461
2462         spin_lock_irqsave(&bnad->bna_lock, flags);
2463         bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL);
2464         bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
2465         bna_enet_enable(&bnad->bna.enet);
2466         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2467
2468         /* Enable broadcast */
2469         bnad_enable_default_bcast(bnad);
2470
2471         /* Restore VLANs, if any */
2472         bnad_restore_vlans(bnad, 0);
2473
2474         /* Set the UCAST address */
2475         spin_lock_irqsave(&bnad->bna_lock, flags);
2476         bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2477         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2478
2479         /* Start the stats timer */
2480         bnad_stats_timer_start(bnad);
2481
2482         mutex_unlock(&bnad->conf_mutex);
2483
2484         return 0;
2485
2486 cleanup_tx:
2487         bnad_cleanup_tx(bnad, 0);
2488
2489 err_return:
2490         mutex_unlock(&bnad->conf_mutex);
2491         return err;
2492 }
2493
2494 static int
2495 bnad_stop(struct net_device *netdev)
2496 {
2497         struct bnad *bnad = netdev_priv(netdev);
2498         unsigned long flags;
2499
2500         mutex_lock(&bnad->conf_mutex);
2501
2502         /* Stop the stats timer */
2503         bnad_stats_timer_stop(bnad);
2504
2505         init_completion(&bnad->bnad_completions.enet_comp);
2506
2507         spin_lock_irqsave(&bnad->bna_lock, flags);
2508         bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2509                         bnad_cb_enet_disabled);
2510         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2511
2512         wait_for_completion(&bnad->bnad_completions.enet_comp);
2513
2514         bnad_cleanup_tx(bnad, 0);
2515         bnad_cleanup_rx(bnad, 0);
2516
2517         /* Synchronize mailbox IRQ */
2518         bnad_mbox_irq_sync(bnad);
2519
2520         mutex_unlock(&bnad->conf_mutex);
2521
2522         return 0;
2523 }
2524
2525 /* TX */
2526 /*
2527  * bnad_start_xmit : Netdev entry point for Transmit
2528  *                   Called under lock held by net_device
2529  */
2530 static netdev_tx_t
2531 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2532 {
2533         struct bnad *bnad = netdev_priv(netdev);
2534         u32 txq_id = 0;
2535         struct bna_tcb *tcb = bnad->tx_info[0].tcb[txq_id];
2536
2537         u16             txq_prod, vlan_tag = 0;
2538         u32             unmap_prod, wis, wis_used, wi_range;
2539         u32             vectors, vect_id, i, acked;
2540         int                     err;
2541         unsigned int            len;
2542         u32                             gso_size;
2543
2544         struct bnad_unmap_q *unmap_q = tcb->unmap_q;
2545         dma_addr_t              dma_addr;
2546         struct bna_txq_entry *txqent;
2547         u16     flags;
2548
2549         if (unlikely(skb->len <= ETH_HLEN)) {
2550                 dev_kfree_skb(skb);
2551                 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2552                 return NETDEV_TX_OK;
2553         }
2554         if (unlikely(skb_headlen(skb) > BFI_TX_MAX_DATA_PER_VECTOR)) {
2555                 dev_kfree_skb(skb);
2556                 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_too_long);
2557                 return NETDEV_TX_OK;
2558         }
2559         if (unlikely(skb_headlen(skb) == 0)) {
2560                 dev_kfree_skb(skb);
2561                 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2562                 return NETDEV_TX_OK;
2563         }
2564
2565         /*
2566          * Takes care of the Tx that is scheduled between clearing the flag
2567          * and the netif_tx_stop_all_queues() call.
2568          */
2569         if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2570                 dev_kfree_skb(skb);
2571                 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2572                 return NETDEV_TX_OK;
2573         }
2574
2575         vectors = 1 + skb_shinfo(skb)->nr_frags;
2576         if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2577                 dev_kfree_skb(skb);
2578                 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2579                 return NETDEV_TX_OK;
2580         }
2581         wis = BNA_TXQ_WI_NEEDED(vectors);       /* 4 vectors per work item */
2582         acked = 0;
2583         if (unlikely(wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2584                         vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2585                 if ((u16) (*tcb->hw_consumer_index) !=
2586                     tcb->consumer_index &&
2587                     !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2588                         acked = bnad_free_txbufs(bnad, tcb);
2589                         if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2590                                 bna_ib_ack(tcb->i_dbell, acked);
2591                         smp_mb__before_clear_bit();
2592                         clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2593                 } else {
2594                         netif_stop_queue(netdev);
2595                         BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2596                 }
2597
2598                 smp_mb();
2599                 /*
2600                  * Check again to deal with race condition between
2601                  * netif_stop_queue here, and netif_wake_queue in
2602                  * interrupt handler which is not inside netif tx lock.
2603                  */
2604                 if (likely
2605                     (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2606                      vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2607                         BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2608                         return NETDEV_TX_BUSY;
2609                 } else {
2610                         netif_wake_queue(netdev);
2611                         BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2612                 }
2613         }
2614
2615         unmap_prod = unmap_q->producer_index;
2616         flags = 0;
2617
2618         txq_prod = tcb->producer_index;
2619         BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
2620         txqent->hdr.wi.reserved = 0;
2621         txqent->hdr.wi.num_vectors = vectors;
2622
2623         if (vlan_tx_tag_present(skb)) {
2624                 vlan_tag = (u16) vlan_tx_tag_get(skb);
2625                 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2626         }
2627         if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2628                 vlan_tag =
2629                         (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
2630                 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2631         }
2632
2633         txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2634
2635         if (skb_is_gso(skb)) {
2636                 gso_size = skb_shinfo(skb)->gso_size;
2637
2638                 if (unlikely(gso_size > netdev->mtu)) {
2639                         dev_kfree_skb(skb);
2640                         BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2641                         return NETDEV_TX_OK;
2642                 }
2643                 if (unlikely((gso_size + skb_transport_offset(skb) +
2644                         tcp_hdrlen(skb)) >= skb->len)) {
2645                         txqent->hdr.wi.opcode =
2646                                 __constant_htons(BNA_TXQ_WI_SEND);
2647                         txqent->hdr.wi.lso_mss = 0;
2648                         BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2649                 } else {
2650                         txqent->hdr.wi.opcode =
2651                                 __constant_htons(BNA_TXQ_WI_SEND_LSO);
2652                         txqent->hdr.wi.lso_mss = htons(gso_size);
2653                 }
2654
2655                 err = bnad_tso_prepare(bnad, skb);
2656                 if (unlikely(err)) {
2657                         dev_kfree_skb(skb);
2658                         BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2659                         return NETDEV_TX_OK;
2660                 }
2661                 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2662                 txqent->hdr.wi.l4_hdr_size_n_offset =
2663                         htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2664                               (tcp_hdrlen(skb) >> 2,
2665                                skb_transport_offset(skb)));
2666         } else {
2667                 txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND);
2668                 txqent->hdr.wi.lso_mss = 0;
2669
2670                 if (unlikely(skb->len > (netdev->mtu + ETH_HLEN))) {
2671                         dev_kfree_skb(skb);
2672                         BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2673                         return NETDEV_TX_OK;
2674                 }
2675
2676                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2677                         u8 proto = 0;
2678
2679                         if (skb->protocol == __constant_htons(ETH_P_IP))
2680                                 proto = ip_hdr(skb)->protocol;
2681                         else if (skb->protocol ==
2682                                  __constant_htons(ETH_P_IPV6)) {
2683                                 /* nexthdr may not be TCP immediately. */
2684                                 proto = ipv6_hdr(skb)->nexthdr;
2685                         }
2686                         if (proto == IPPROTO_TCP) {
2687                                 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2688                                 txqent->hdr.wi.l4_hdr_size_n_offset =
2689                                         htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2690                                               (0, skb_transport_offset(skb)));
2691
2692                                 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2693
2694                                 if (unlikely(skb_headlen(skb) <
2695                                 skb_transport_offset(skb) + tcp_hdrlen(skb))) {
2696                                         dev_kfree_skb(skb);
2697                                         BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2698                                         return NETDEV_TX_OK;
2699                                 }
2700
2701                         } else if (proto == IPPROTO_UDP) {
2702                                 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2703                                 txqent->hdr.wi.l4_hdr_size_n_offset =
2704                                         htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2705                                               (0, skb_transport_offset(skb)));
2706
2707                                 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2708                                 if (unlikely(skb_headlen(skb) <
2709                                     skb_transport_offset(skb) +
2710                                     sizeof(struct udphdr))) {
2711                                         dev_kfree_skb(skb);
2712                                         BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2713                                         return NETDEV_TX_OK;
2714                                 }
2715                         } else {
2716                                 dev_kfree_skb(skb);
2717                                 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2718                                 return NETDEV_TX_OK;
2719                         }
2720                 } else {
2721                         txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2722                 }
2723         }
2724
2725         txqent->hdr.wi.flags = htons(flags);
2726
2727         txqent->hdr.wi.frame_length = htonl(skb->len);
2728
2729         unmap_q->unmap_array[unmap_prod].skb = skb;
2730         len = skb_headlen(skb);
2731         txqent->vector[0].length = htons(len);
2732         dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2733                                   skb_headlen(skb), DMA_TO_DEVICE);
2734         dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2735                            dma_addr);
2736
2737         BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
2738         BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2739
2740         vect_id = 0;
2741         wis_used = 1;
2742
2743         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2744                 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
2745                 u16             size = skb_frag_size(frag);
2746
2747                 if (unlikely(size == 0)) {
2748                         unmap_prod = unmap_q->producer_index;
2749
2750                         unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
2751                                            unmap_q->unmap_array,
2752                                            unmap_prod, unmap_q->q_depth, skb,
2753                                            i);
2754                         dev_kfree_skb(skb);
2755                         BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
2756                         return NETDEV_TX_OK;
2757                 }
2758
2759                 len += size;
2760
2761                 if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2762                         vect_id = 0;
2763                         if (--wi_range)
2764                                 txqent++;
2765                         else {
2766                                 BNA_QE_INDX_ADD(txq_prod, wis_used,
2767                                                 tcb->q_depth);
2768                                 wis_used = 0;
2769                                 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
2770                                                      txqent, wi_range);
2771                         }
2772                         wis_used++;
2773                         txqent->hdr.wi_ext.opcode =
2774                                 __constant_htons(BNA_TXQ_WI_EXTENSION);
2775                 }
2776
2777                 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2778                 txqent->vector[vect_id].length = htons(size);
2779                 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
2780                                             0, size, DMA_TO_DEVICE);
2781                 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2782                                    dma_addr);
2783                 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2784                 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2785         }
2786
2787         if (unlikely(len != skb->len)) {
2788                 unmap_prod = unmap_q->producer_index;
2789
2790                 unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
2791                                 unmap_q->unmap_array, unmap_prod,
2792                                 unmap_q->q_depth, skb,
2793                                 skb_shinfo(skb)->nr_frags);
2794                 dev_kfree_skb(skb);
2795                 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
2796                 return NETDEV_TX_OK;
2797         }
2798
2799         unmap_q->producer_index = unmap_prod;
2800         BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
2801         tcb->producer_index = txq_prod;
2802
2803         smp_mb();
2804
2805         if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2806                 return NETDEV_TX_OK;
2807
2808         bna_txq_prod_indx_doorbell(tcb);
2809         smp_mb();
2810
2811         if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
2812                 tasklet_schedule(&bnad->tx_free_tasklet);
2813
2814         return NETDEV_TX_OK;
2815 }
2816
2817 /*
2818  * Used spin_lock to synchronize reading of stats structures, which
2819  * is written by BNA under the same lock.
2820  */
2821 static struct rtnl_link_stats64 *
2822 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
2823 {
2824         struct bnad *bnad = netdev_priv(netdev);
2825         unsigned long flags;
2826
2827         spin_lock_irqsave(&bnad->bna_lock, flags);
2828
2829         bnad_netdev_qstats_fill(bnad, stats);
2830         bnad_netdev_hwstats_fill(bnad, stats);
2831
2832         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2833
2834         return stats;
2835 }
2836
2837 void
2838 bnad_set_rx_mode(struct net_device *netdev)
2839 {
2840         struct bnad *bnad = netdev_priv(netdev);
2841         u32     new_mask, valid_mask;
2842         unsigned long flags;
2843
2844         spin_lock_irqsave(&bnad->bna_lock, flags);
2845
2846         new_mask = valid_mask = 0;
2847
2848         if (netdev->flags & IFF_PROMISC) {
2849                 if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2850                         new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2851                         valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2852                         bnad->cfg_flags |= BNAD_CF_PROMISC;
2853                 }
2854         } else {
2855                 if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2856                         new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2857                         valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2858                         bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2859                 }
2860         }
2861
2862         if (netdev->flags & IFF_ALLMULTI) {
2863                 if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2864                         new_mask |= BNA_RXMODE_ALLMULTI;
2865                         valid_mask |= BNA_RXMODE_ALLMULTI;
2866                         bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2867                 }
2868         } else {
2869                 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2870                         new_mask &= ~BNA_RXMODE_ALLMULTI;
2871                         valid_mask |= BNA_RXMODE_ALLMULTI;
2872                         bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2873                 }
2874         }
2875
2876         if (bnad->rx_info[0].rx == NULL)
2877                 goto unlock;
2878
2879         bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2880
2881         if (!netdev_mc_empty(netdev)) {
2882                 u8 *mcaddr_list;
2883                 int mc_count = netdev_mc_count(netdev);
2884
2885                 /* Index 0 holds the broadcast address */
2886                 mcaddr_list =
2887                         kzalloc((mc_count + 1) * ETH_ALEN,
2888                                 GFP_ATOMIC);
2889                 if (!mcaddr_list)
2890                         goto unlock;
2891
2892                 memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2893
2894                 /* Copy rest of the MC addresses */
2895                 bnad_netdev_mc_list_get(netdev, mcaddr_list);
2896
2897                 bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
2898                                         mcaddr_list, NULL);
2899
2900                 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2901                 kfree(mcaddr_list);
2902         }
2903 unlock:
2904         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2905 }
2906
2907 /*
2908  * bna_lock is used to sync writes to netdev->addr
2909  * conf_lock cannot be used since this call may be made
2910  * in a non-blocking context.
2911  */
2912 static int
2913 bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
2914 {
2915         int err;
2916         struct bnad *bnad = netdev_priv(netdev);
2917         struct sockaddr *sa = (struct sockaddr *)mac_addr;
2918         unsigned long flags;
2919
2920         spin_lock_irqsave(&bnad->bna_lock, flags);
2921
2922         err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
2923
2924         if (!err)
2925                 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
2926
2927         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2928
2929         return err;
2930 }
2931
2932 static int
2933 bnad_mtu_set(struct bnad *bnad, int mtu)
2934 {
2935         unsigned long flags;
2936
2937         init_completion(&bnad->bnad_completions.mtu_comp);
2938
2939         spin_lock_irqsave(&bnad->bna_lock, flags);
2940         bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set);
2941         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2942
2943         wait_for_completion(&bnad->bnad_completions.mtu_comp);
2944
2945         return bnad->bnad_completions.mtu_comp_status;
2946 }
2947
2948 static int
2949 bnad_change_mtu(struct net_device *netdev, int new_mtu)
2950 {
2951         int err, mtu = netdev->mtu;
2952         struct bnad *bnad = netdev_priv(netdev);
2953
2954         if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
2955                 return -EINVAL;
2956
2957         mutex_lock(&bnad->conf_mutex);
2958
2959         netdev->mtu = new_mtu;
2960
2961         mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
2962         err = bnad_mtu_set(bnad, mtu);
2963         if (err)
2964                 err = -EBUSY;
2965
2966         mutex_unlock(&bnad->conf_mutex);
2967         return err;
2968 }
2969
2970 static void
2971 bnad_vlan_rx_add_vid(struct net_device *netdev,
2972                                  unsigned short vid)
2973 {
2974         struct bnad *bnad = netdev_priv(netdev);
2975         unsigned long flags;
2976
2977         if (!bnad->rx_info[0].rx)
2978                 return;
2979
2980         mutex_lock(&bnad->conf_mutex);
2981
2982         spin_lock_irqsave(&bnad->bna_lock, flags);
2983         bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
2984         set_bit(vid, bnad->active_vlans);
2985         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2986
2987         mutex_unlock(&bnad->conf_mutex);
2988 }
2989
2990 static void
2991 bnad_vlan_rx_kill_vid(struct net_device *netdev,
2992                                   unsigned short vid)
2993 {
2994         struct bnad *bnad = netdev_priv(netdev);
2995         unsigned long flags;
2996
2997         if (!bnad->rx_info[0].rx)
2998                 return;
2999
3000         mutex_lock(&bnad->conf_mutex);
3001
3002         spin_lock_irqsave(&bnad->bna_lock, flags);
3003         clear_bit(vid, bnad->active_vlans);
3004         bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3005         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3006
3007         mutex_unlock(&bnad->conf_mutex);
3008 }
3009
3010 #ifdef CONFIG_NET_POLL_CONTROLLER
3011 static void
3012 bnad_netpoll(struct net_device *netdev)
3013 {
3014         struct bnad *bnad = netdev_priv(netdev);
3015         struct bnad_rx_info *rx_info;
3016         struct bnad_rx_ctrl *rx_ctrl;
3017         u32 curr_mask;
3018         int i, j;
3019
3020         if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3021                 bna_intx_disable(&bnad->bna, curr_mask);
3022                 bnad_isr(bnad->pcidev->irq, netdev);
3023                 bna_intx_enable(&bnad->bna, curr_mask);
3024         } else {
3025                 /*
3026                  * Tx processing may happen in sending context, so no need
3027                  * to explicitly process completions here
3028                  */
3029
3030                 /* Rx processing */
3031                 for (i = 0; i < bnad->num_rx; i++) {
3032                         rx_info = &bnad->rx_info[i];
3033                         if (!rx_info->rx)
3034                                 continue;
3035                         for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3036                                 rx_ctrl = &rx_info->rx_ctrl[j];
3037                                 if (rx_ctrl->ccb)
3038                                         bnad_netif_rx_schedule_poll(bnad,
3039                                                             rx_ctrl->ccb);
3040                         }
3041                 }
3042         }
3043 }
3044 #endif
3045
3046 static const struct net_device_ops bnad_netdev_ops = {
3047         .ndo_open               = bnad_open,
3048         .ndo_stop               = bnad_stop,
3049         .ndo_start_xmit         = bnad_start_xmit,
3050         .ndo_get_stats64                = bnad_get_stats64,
3051         .ndo_set_rx_mode        = bnad_set_rx_mode,
3052         .ndo_validate_addr      = eth_validate_addr,
3053         .ndo_set_mac_address    = bnad_set_mac_address,
3054         .ndo_change_mtu         = bnad_change_mtu,
3055         .ndo_vlan_rx_add_vid    = bnad_vlan_rx_add_vid,
3056         .ndo_vlan_rx_kill_vid   = bnad_vlan_rx_kill_vid,
3057 #ifdef CONFIG_NET_POLL_CONTROLLER
3058         .ndo_poll_controller    = bnad_netpoll
3059 #endif
3060 };
3061
3062 static void
3063 bnad_netdev_init(struct bnad *bnad, bool using_dac)
3064 {
3065         struct net_device *netdev = bnad->netdev;
3066
3067         netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3068                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3069                 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX;
3070
3071         netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3072                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3073                 NETIF_F_TSO | NETIF_F_TSO6;
3074
3075         netdev->features |= netdev->hw_features |
3076                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3077
3078         if (using_dac)
3079                 netdev->features |= NETIF_F_HIGHDMA;
3080
3081         netdev->mem_start = bnad->mmio_start;
3082         netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3083
3084         netdev->netdev_ops = &bnad_netdev_ops;
3085         bnad_set_ethtool_ops(netdev);
3086 }
3087
3088 /*
3089  * 1. Initialize the bnad structure
3090  * 2. Setup netdev pointer in pci_dev
3091  * 3. Initialze Tx free tasklet
3092  * 4. Initialize no. of TxQ & CQs & MSIX vectors
3093  */
3094 static int
3095 bnad_init(struct bnad *bnad,
3096           struct pci_dev *pdev, struct net_device *netdev)
3097 {
3098         unsigned long flags;
3099
3100         SET_NETDEV_DEV(netdev, &pdev->dev);
3101         pci_set_drvdata(pdev, netdev);
3102
3103         bnad->netdev = netdev;
3104         bnad->pcidev = pdev;
3105         bnad->mmio_start = pci_resource_start(pdev, 0);
3106         bnad->mmio_len = pci_resource_len(pdev, 0);
3107         bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3108         if (!bnad->bar0) {
3109                 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3110                 pci_set_drvdata(pdev, NULL);
3111                 return -ENOMEM;
3112         }
3113         pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
3114                (unsigned long long) bnad->mmio_len);
3115
3116         spin_lock_irqsave(&bnad->bna_lock, flags);
3117         if (!bnad_msix_disable)
3118                 bnad->cfg_flags = BNAD_CF_MSIX;
3119
3120         bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3121
3122         bnad_q_num_init(bnad);
3123         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3124
3125         bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3126                 (bnad->num_rx * bnad->num_rxp_per_rx) +
3127                          BNAD_MAILBOX_MSIX_VECTORS;
3128
3129         bnad->txq_depth = BNAD_TXQ_DEPTH;
3130         bnad->rxq_depth = BNAD_RXQ_DEPTH;
3131
3132         bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3133         bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3134
3135         tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
3136                      (unsigned long)bnad);
3137
3138         return 0;
3139 }
3140
3141 /*
3142  * Must be called after bnad_pci_uninit()
3143  * so that iounmap() and pci_set_drvdata(NULL)
3144  * happens only after PCI uninitialization.
3145  */
3146 static void
3147 bnad_uninit(struct bnad *bnad)
3148 {
3149         if (bnad->bar0)
3150                 iounmap(bnad->bar0);
3151         pci_set_drvdata(bnad->pcidev, NULL);
3152 }
3153
3154 /*
3155  * Initialize locks
3156         a) Per ioceth mutes used for serializing configuration
3157            changes from OS interface
3158         b) spin lock used to protect bna state machine
3159  */
3160 static void
3161 bnad_lock_init(struct bnad *bnad)
3162 {
3163         spin_lock_init(&bnad->bna_lock);
3164         mutex_init(&bnad->conf_mutex);
3165 }
3166
3167 static void
3168 bnad_lock_uninit(struct bnad *bnad)
3169 {
3170         mutex_destroy(&bnad->conf_mutex);
3171 }
3172
3173 /* PCI Initialization */
3174 static int
3175 bnad_pci_init(struct bnad *bnad,
3176               struct pci_dev *pdev, bool *using_dac)
3177 {
3178         int err;
3179
3180         err = pci_enable_device(pdev);
3181         if (err)
3182                 return err;
3183         err = pci_request_regions(pdev, BNAD_NAME);
3184         if (err)
3185                 goto disable_device;
3186         if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3187             !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3188                 *using_dac = 1;
3189         } else {
3190                 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3191                 if (err) {
3192                         err = dma_set_coherent_mask(&pdev->dev,
3193                                                     DMA_BIT_MASK(32));
3194                         if (err)
3195                                 goto release_regions;
3196                 }
3197                 *using_dac = 0;
3198         }
3199         pci_set_master(pdev);
3200         return 0;
3201
3202 release_regions:
3203         pci_release_regions(pdev);
3204 disable_device:
3205         pci_disable_device(pdev);
3206
3207         return err;
3208 }
3209
3210 static void
3211 bnad_pci_uninit(struct pci_dev *pdev)
3212 {
3213         pci_release_regions(pdev);
3214         pci_disable_device(pdev);
3215 }
3216
3217 static int __devinit
3218 bnad_pci_probe(struct pci_dev *pdev,
3219                 const struct pci_device_id *pcidev_id)
3220 {
3221         bool    using_dac;
3222         int     err;
3223         struct bnad *bnad;
3224         struct bna *bna;
3225         struct net_device *netdev;
3226         struct bfa_pcidev pcidev_info;
3227         unsigned long flags;
3228
3229         pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3230                pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3231
3232         mutex_lock(&bnad_fwimg_mutex);
3233         if (!cna_get_firmware_buf(pdev)) {
3234                 mutex_unlock(&bnad_fwimg_mutex);
3235                 pr_warn("Failed to load Firmware Image!\n");
3236                 return -ENODEV;
3237         }
3238         mutex_unlock(&bnad_fwimg_mutex);
3239
3240         /*
3241          * Allocates sizeof(struct net_device + struct bnad)
3242          * bnad = netdev->priv
3243          */
3244         netdev = alloc_etherdev(sizeof(struct bnad));
3245         if (!netdev) {
3246                 dev_err(&pdev->dev, "netdev allocation failed\n");
3247                 err = -ENOMEM;
3248                 return err;
3249         }
3250         bnad = netdev_priv(netdev);
3251
3252         bnad_lock_init(bnad);
3253
3254         mutex_lock(&bnad->conf_mutex);
3255         /*
3256          * PCI initialization
3257          *      Output : using_dac = 1 for 64 bit DMA
3258          *                         = 0 for 32 bit DMA
3259          */
3260         err = bnad_pci_init(bnad, pdev, &using_dac);
3261         if (err)
3262                 goto unlock_mutex;
3263
3264         /*
3265          * Initialize bnad structure
3266          * Setup relation between pci_dev & netdev
3267          * Init Tx free tasklet
3268          */
3269         err = bnad_init(bnad, pdev, netdev);
3270         if (err)
3271                 goto pci_uninit;
3272
3273         /* Initialize netdev structure, set up ethtool ops */
3274         bnad_netdev_init(bnad, using_dac);
3275
3276         /* Set link to down state */
3277         netif_carrier_off(netdev);
3278
3279         /* Get resource requirement form bna */
3280         spin_lock_irqsave(&bnad->bna_lock, flags);
3281         bna_res_req(&bnad->res_info[0]);
3282         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3283
3284         /* Allocate resources from bna */
3285         err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3286         if (err)
3287                 goto drv_uninit;
3288
3289         bna = &bnad->bna;
3290
3291         /* Setup pcidev_info for bna_init() */
3292         pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3293         pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3294         pcidev_info.device_id = bnad->pcidev->device;
3295         pcidev_info.pci_bar_kva = bnad->bar0;
3296
3297         spin_lock_irqsave(&bnad->bna_lock, flags);
3298         bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3299         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3300
3301         bnad->stats.bna_stats = &bna->stats;
3302
3303         bnad_enable_msix(bnad);
3304         err = bnad_mbox_irq_alloc(bnad);
3305         if (err)
3306                 goto res_free;
3307
3308
3309         /* Set up timers */
3310         setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
3311                                 ((unsigned long)bnad));
3312         setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
3313                                 ((unsigned long)bnad));
3314         setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
3315                                 ((unsigned long)bnad));
3316         setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3317                                 ((unsigned long)bnad));
3318
3319         /* Now start the timer before calling IOC */
3320         mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
3321                   jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3322
3323         /*
3324          * Start the chip
3325          * If the call back comes with error, we bail out.
3326          * This is a catastrophic error.
3327          */
3328         err = bnad_ioceth_enable(bnad);
3329         if (err) {
3330                 pr_err("BNA: Initialization failed err=%d\n",
3331                        err);
3332                 goto probe_success;
3333         }
3334
3335         spin_lock_irqsave(&bnad->bna_lock, flags);
3336         if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3337                 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3338                 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3339                         bna_attr(bna)->num_rxp - 1);
3340                 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3341                         bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3342                         err = -EIO;
3343         }
3344         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3345         if (err)
3346                 goto disable_ioceth;
3347
3348         spin_lock_irqsave(&bnad->bna_lock, flags);
3349         bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3350         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3351
3352         err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3353         if (err) {
3354                 err = -EIO;
3355                 goto disable_ioceth;
3356         }
3357
3358         spin_lock_irqsave(&bnad->bna_lock, flags);
3359         bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3360         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3361
3362         /* Get the burnt-in mac */
3363         spin_lock_irqsave(&bnad->bna_lock, flags);
3364         bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
3365         bnad_set_netdev_perm_addr(bnad);
3366         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3367
3368         mutex_unlock(&bnad->conf_mutex);
3369
3370         /* Finally, reguister with net_device layer */
3371         err = register_netdev(netdev);
3372         if (err) {
3373                 pr_err("BNA : Registering with netdev failed\n");
3374                 goto probe_uninit;
3375         }
3376         set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3377
3378         return 0;
3379
3380 probe_success:
3381         mutex_unlock(&bnad->conf_mutex);
3382         return 0;
3383
3384 probe_uninit:
3385         mutex_lock(&bnad->conf_mutex);
3386         bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3387 disable_ioceth:
3388         bnad_ioceth_disable(bnad);
3389         del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3390         del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3391         del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3392         spin_lock_irqsave(&bnad->bna_lock, flags);
3393         bna_uninit(bna);
3394         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3395         bnad_mbox_irq_free(bnad);
3396         bnad_disable_msix(bnad);
3397 res_free:
3398         bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3399 drv_uninit:
3400         bnad_uninit(bnad);
3401 pci_uninit:
3402         bnad_pci_uninit(pdev);
3403 unlock_mutex:
3404         mutex_unlock(&bnad->conf_mutex);
3405         bnad_lock_uninit(bnad);
3406         free_netdev(netdev);
3407         return err;
3408 }
3409
3410 static void __devexit
3411 bnad_pci_remove(struct pci_dev *pdev)
3412 {
3413         struct net_device *netdev = pci_get_drvdata(pdev);
3414         struct bnad *bnad;
3415         struct bna *bna;
3416         unsigned long flags;
3417
3418         if (!netdev)
3419                 return;
3420
3421         pr_info("%s bnad_pci_remove\n", netdev->name);
3422         bnad = netdev_priv(netdev);
3423         bna = &bnad->bna;
3424
3425         if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3426                 unregister_netdev(netdev);
3427
3428         mutex_lock(&bnad->conf_mutex);
3429         bnad_ioceth_disable(bnad);
3430         del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3431         del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3432         del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3433         spin_lock_irqsave(&bnad->bna_lock, flags);
3434         bna_uninit(bna);
3435         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3436
3437         bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3438         bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3439         bnad_mbox_irq_free(bnad);
3440         bnad_disable_msix(bnad);
3441         bnad_pci_uninit(pdev);
3442         mutex_unlock(&bnad->conf_mutex);
3443         bnad_lock_uninit(bnad);
3444         bnad_uninit(bnad);
3445         free_netdev(netdev);
3446 }
3447
3448 static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
3449         {
3450                 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3451                         PCI_DEVICE_ID_BROCADE_CT),
3452                 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3453                 .class_mask =  0xffff00
3454         },
3455         {
3456                 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3457                         BFA_PCI_DEVICE_ID_CT2),
3458                 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3459                 .class_mask =  0xffff00
3460         },
3461         {0,  },
3462 };
3463
3464 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3465
3466 static struct pci_driver bnad_pci_driver = {
3467         .name = BNAD_NAME,
3468         .id_table = bnad_pci_id_table,
3469         .probe = bnad_pci_probe,
3470         .remove = __devexit_p(bnad_pci_remove),
3471 };
3472
3473 static int __init
3474 bnad_module_init(void)
3475 {
3476         int err;
3477
3478         pr_info("Brocade 10G Ethernet driver - version: %s\n",
3479                         BNAD_VERSION);
3480
3481         bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3482
3483         err = pci_register_driver(&bnad_pci_driver);
3484         if (err < 0) {
3485                 pr_err("bna : PCI registration failed in module init "
3486                        "(%d)\n", err);
3487                 return err;
3488         }
3489
3490         return 0;
3491 }
3492
3493 static void __exit
3494 bnad_module_exit(void)
3495 {
3496         pci_unregister_driver(&bnad_pci_driver);
3497
3498         if (bfi_fw)
3499                 release_firmware(bfi_fw);
3500 }
3501
3502 module_init(bnad_module_init);
3503 module_exit(bnad_module_exit);
3504
3505 MODULE_AUTHOR("Brocade");
3506 MODULE_LICENSE("GPL");
3507 MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3508 MODULE_VERSION(BNAD_VERSION);
3509 MODULE_FIRMWARE(CNA_FW_FILE_CT);
3510 MODULE_FIRMWARE(CNA_FW_FILE_CT2);