2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
18 #include <linux/bitops.h>
19 #include <linux/netdevice.h>
20 #include <linux/skbuff.h>
21 #include <linux/etherdevice.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_vlan.h>
25 #include <linux/if_ether.h>
27 #include <linux/prefetch.h>
28 #include <linux/module.h>
34 static DEFINE_MUTEX(bnad_fwimg_mutex);
39 static uint bnad_msix_disable;
40 module_param(bnad_msix_disable, uint, 0444);
41 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
43 static uint bnad_ioc_auto_recover = 1;
44 module_param(bnad_ioc_auto_recover, uint, 0444);
45 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
47 static uint bna_debugfs_enable = 1;
48 module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR);
49 MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
50 " Range[false:0|true:1]");
55 static u32 bnad_rxqs_per_cq = 2;
57 static struct mutex bnad_list_mutex;
58 static LIST_HEAD(bnad_list);
59 static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
64 #define BNAD_GET_MBOX_IRQ(_bnad) \
65 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
66 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
67 ((_bnad)->pcidev->irq))
69 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size) \
71 (_res_info)->res_type = BNA_RES_T_MEM; \
72 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
73 (_res_info)->res_u.mem_info.num = (_num); \
74 (_res_info)->res_u.mem_info.len = (_size); \
78 bnad_add_to_list(struct bnad *bnad)
80 mutex_lock(&bnad_list_mutex);
81 list_add_tail(&bnad->list_entry, &bnad_list);
83 mutex_unlock(&bnad_list_mutex);
87 bnad_remove_from_list(struct bnad *bnad)
89 mutex_lock(&bnad_list_mutex);
90 list_del(&bnad->list_entry);
91 mutex_unlock(&bnad_list_mutex);
95 * Reinitialize completions in CQ, once Rx is taken down
98 bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
100 struct bna_cq_entry *cmpl;
103 for (i = 0; i < ccb->q_depth; i++) {
104 cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
109 /* Tx Datapath functions */
112 /* Caller should ensure that the entry at unmap_q[index] is valid */
114 bnad_tx_buff_unmap(struct bnad *bnad,
115 struct bnad_tx_unmap *unmap_q,
116 u32 q_depth, u32 index)
118 struct bnad_tx_unmap *unmap;
122 unmap = &unmap_q[index];
123 nvecs = unmap->nvecs;
128 dma_unmap_single(&bnad->pcidev->dev,
129 dma_unmap_addr(&unmap->vectors[0], dma_addr),
130 skb_headlen(skb), DMA_TO_DEVICE);
131 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
137 if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
139 BNA_QE_INDX_INC(index, q_depth);
140 unmap = &unmap_q[index];
143 dma_unmap_page(&bnad->pcidev->dev,
144 dma_unmap_addr(&unmap->vectors[vector], dma_addr),
145 dma_unmap_len(&unmap->vectors[vector], dma_len),
147 dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
151 BNA_QE_INDX_INC(index, q_depth);
157 * Frees all pending Tx Bufs
158 * At this point no activity is expected on the Q,
159 * so DMA unmap & freeing is fine.
162 bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
164 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
168 for (i = 0; i < tcb->q_depth; i++) {
169 skb = unmap_q[i].skb;
172 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
174 dev_kfree_skb_any(skb);
179 * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
180 * Can be called in a) Interrupt context
184 bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
186 u32 sent_packets = 0, sent_bytes = 0;
187 u32 wis, unmap_wis, hw_cons, cons, q_depth;
188 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
189 struct bnad_tx_unmap *unmap;
192 /* Just return if TX is stopped */
193 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
196 hw_cons = *(tcb->hw_consumer_index);
197 cons = tcb->consumer_index;
198 q_depth = tcb->q_depth;
200 wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
201 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
204 unmap = &unmap_q[cons];
209 sent_bytes += skb->len;
211 unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
214 cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
215 dev_kfree_skb_any(skb);
218 /* Update consumer pointers. */
219 tcb->consumer_index = hw_cons;
221 tcb->txq->tx_packets += sent_packets;
222 tcb->txq->tx_bytes += sent_bytes;
228 bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
230 struct net_device *netdev = bnad->netdev;
233 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
236 sent = bnad_txcmpl_process(bnad, tcb);
238 if (netif_queue_stopped(netdev) &&
239 netif_carrier_ok(netdev) &&
240 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
241 BNAD_NETIF_WAKE_THRESHOLD) {
242 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
243 netif_wake_queue(netdev);
244 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
249 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
250 bna_ib_ack(tcb->i_dbell, sent);
252 smp_mb__before_clear_bit();
253 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
258 /* MSIX Tx Completion Handler */
260 bnad_msix_tx(int irq, void *data)
262 struct bna_tcb *tcb = (struct bna_tcb *)data;
263 struct bnad *bnad = tcb->bnad;
265 bnad_tx_complete(bnad, tcb);
271 bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
273 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
275 unmap_q->reuse_pi = -1;
276 unmap_q->alloc_order = -1;
277 unmap_q->map_size = 0;
278 unmap_q->type = BNAD_RXBUF_NONE;
281 /* Default is page-based allocation. Multi-buffer support - TBD */
283 bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
285 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
288 bnad_rxq_alloc_uninit(bnad, rcb);
290 order = get_order(rcb->rxq->buffer_size);
292 unmap_q->type = BNAD_RXBUF_PAGE;
294 if (bna_is_small_rxq(rcb->id)) {
295 unmap_q->alloc_order = 0;
296 unmap_q->map_size = rcb->rxq->buffer_size;
298 if (rcb->rxq->multi_buffer) {
299 unmap_q->alloc_order = 0;
300 unmap_q->map_size = rcb->rxq->buffer_size;
301 unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
303 unmap_q->alloc_order = order;
305 (rcb->rxq->buffer_size > 2048) ?
306 PAGE_SIZE << order : 2048;
310 BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size));
316 bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
321 dma_unmap_page(&bnad->pcidev->dev,
322 dma_unmap_addr(&unmap->vector, dma_addr),
323 unmap->vector.len, DMA_FROM_DEVICE);
324 put_page(unmap->page);
326 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
327 unmap->vector.len = 0;
331 bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
336 dma_unmap_single(&bnad->pcidev->dev,
337 dma_unmap_addr(&unmap->vector, dma_addr),
338 unmap->vector.len, DMA_FROM_DEVICE);
339 dev_kfree_skb_any(unmap->skb);
341 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
342 unmap->vector.len = 0;
346 bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
348 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
351 for (i = 0; i < rcb->q_depth; i++) {
352 struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
354 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
355 bnad_rxq_cleanup_skb(bnad, unmap);
357 bnad_rxq_cleanup_page(bnad, unmap);
359 bnad_rxq_alloc_uninit(bnad, rcb);
363 bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
365 u32 alloced, prod, q_depth;
366 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
367 struct bnad_rx_unmap *unmap, *prev;
368 struct bna_rxq_entry *rxent;
370 u32 page_offset, alloc_size;
373 prod = rcb->producer_index;
374 q_depth = rcb->q_depth;
376 alloc_size = PAGE_SIZE << unmap_q->alloc_order;
380 unmap = &unmap_q->unmap[prod];
382 if (unmap_q->reuse_pi < 0) {
383 page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
384 unmap_q->alloc_order);
387 prev = &unmap_q->unmap[unmap_q->reuse_pi];
389 page_offset = prev->page_offset + unmap_q->map_size;
393 if (unlikely(!page)) {
394 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
395 rcb->rxq->rxbuf_alloc_failed++;
399 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
400 unmap_q->map_size, DMA_FROM_DEVICE);
403 unmap->page_offset = page_offset;
404 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
405 unmap->vector.len = unmap_q->map_size;
406 page_offset += unmap_q->map_size;
408 if (page_offset < alloc_size)
409 unmap_q->reuse_pi = prod;
411 unmap_q->reuse_pi = -1;
413 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
414 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
415 BNA_QE_INDX_INC(prod, q_depth);
420 if (likely(alloced)) {
421 rcb->producer_index = prod;
423 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
424 bna_rxq_prod_indx_doorbell(rcb);
431 bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
433 u32 alloced, prod, q_depth, buff_sz;
434 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
435 struct bnad_rx_unmap *unmap;
436 struct bna_rxq_entry *rxent;
440 buff_sz = rcb->rxq->buffer_size;
441 prod = rcb->producer_index;
442 q_depth = rcb->q_depth;
446 unmap = &unmap_q->unmap[prod];
448 skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
450 if (unlikely(!skb)) {
451 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
452 rcb->rxq->rxbuf_alloc_failed++;
455 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
456 buff_sz, DMA_FROM_DEVICE);
459 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
460 unmap->vector.len = buff_sz;
462 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
463 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
464 BNA_QE_INDX_INC(prod, q_depth);
469 if (likely(alloced)) {
470 rcb->producer_index = prod;
472 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
473 bna_rxq_prod_indx_doorbell(rcb);
480 bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
482 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
485 to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
486 if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
489 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
490 bnad_rxq_refill_skb(bnad, rcb, to_alloc);
492 bnad_rxq_refill_page(bnad, rcb, to_alloc);
495 #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
497 BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
498 BNA_CQ_EF_L4_CKSUM_OK)
500 #define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
501 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
502 #define flags_tcp6 (BNA_CQ_EF_IPV6 | \
503 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
504 #define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
505 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
506 #define flags_udp6 (BNA_CQ_EF_IPV6 | \
507 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
510 bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
511 u32 sop_ci, u32 nvecs)
513 struct bnad_rx_unmap_q *unmap_q;
514 struct bnad_rx_unmap *unmap;
517 unmap_q = rcb->unmap_q;
518 for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
519 unmap = &unmap_q->unmap[ci];
520 BNA_QE_INDX_INC(ci, rcb->q_depth);
522 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
523 bnad_rxq_cleanup_skb(bnad, unmap);
525 bnad_rxq_cleanup_page(bnad, unmap);
530 bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
531 u32 sop_ci, u32 nvecs, u32 last_fraglen)
534 u32 ci, vec, len, totlen = 0;
535 struct bnad_rx_unmap_q *unmap_q;
536 struct bnad_rx_unmap *unmap;
538 unmap_q = rcb->unmap_q;
541 /* prefetch header */
542 prefetch(page_address(unmap_q->unmap[sop_ci].page) +
543 unmap_q->unmap[sop_ci].page_offset);
545 for (vec = 1, ci = sop_ci; vec <= nvecs; vec++) {
546 unmap = &unmap_q->unmap[ci];
547 BNA_QE_INDX_INC(ci, rcb->q_depth);
549 dma_unmap_page(&bnad->pcidev->dev,
550 dma_unmap_addr(&unmap->vector, dma_addr),
551 unmap->vector.len, DMA_FROM_DEVICE);
553 len = (vec == nvecs) ?
554 last_fraglen : unmap->vector.len;
557 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
558 unmap->page, unmap->page_offset, len);
561 unmap->vector.len = 0;
565 skb->data_len += totlen;
566 skb->truesize += totlen;
570 bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
571 struct bnad_rx_unmap *unmap, u32 len)
575 dma_unmap_single(&bnad->pcidev->dev,
576 dma_unmap_addr(&unmap->vector, dma_addr),
577 unmap->vector.len, DMA_FROM_DEVICE);
580 skb->protocol = eth_type_trans(skb, bnad->netdev);
583 unmap->vector.len = 0;
587 bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
589 struct bna_cq_entry *cq, *cmpl, *next_cmpl;
590 struct bna_rcb *rcb = NULL;
591 struct bnad_rx_unmap_q *unmap_q;
592 struct bnad_rx_unmap *unmap = NULL;
593 struct sk_buff *skb = NULL;
594 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
595 struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
596 u32 packets = 0, len = 0, totlen = 0;
597 u32 pi, vec, sop_ci = 0, nvecs = 0;
598 u32 flags, masked_flags;
600 prefetch(bnad->netdev);
603 cmpl = &cq[ccb->producer_index];
605 while (packets < budget) {
608 /* The 'valid' field is set by the adapter, only after writing
609 * the other fields of completion entry. Hence, do not load
610 * other fields of completion entry *before* the 'valid' is
611 * loaded. Adding the rmb() here prevents the compiler and/or
612 * CPU from reordering the reads which would potentially result
613 * in reading stale values in completion entry.
617 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
619 if (bna_is_small_rxq(cmpl->rxq_id))
624 unmap_q = rcb->unmap_q;
626 /* start of packet ci */
627 sop_ci = rcb->consumer_index;
629 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
630 unmap = &unmap_q->unmap[sop_ci];
633 skb = napi_get_frags(&rx_ctrl->napi);
639 flags = ntohl(cmpl->flags);
640 len = ntohs(cmpl->length);
644 /* Check all the completions for this frame.
645 * busy-wait doesn't help much, break here.
647 if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
648 (flags & BNA_CQ_EF_EOP) == 0) {
649 pi = ccb->producer_index;
651 BNA_QE_INDX_INC(pi, ccb->q_depth);
654 if (!next_cmpl->valid)
656 /* The 'valid' field is set by the adapter, only
657 * after writing the other fields of completion
658 * entry. Hence, do not load other fields of
659 * completion entry *before* the 'valid' is
660 * loaded. Adding the rmb() here prevents the
661 * compiler and/or CPU from reordering the reads
662 * which would potentially result in reading
663 * stale values in completion entry.
667 len = ntohs(next_cmpl->length);
668 flags = ntohl(next_cmpl->flags);
672 } while ((flags & BNA_CQ_EF_EOP) == 0);
674 if (!next_cmpl->valid)
678 /* TODO: BNA_CQ_EF_LOCAL ? */
679 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
680 BNA_CQ_EF_FCS_ERROR |
681 BNA_CQ_EF_TOO_LONG))) {
682 bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
683 rcb->rxq->rx_packets_with_error++;
688 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
689 bnad_cq_setup_skb(bnad, skb, unmap, len);
691 bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
694 rcb->rxq->rx_packets++;
695 rcb->rxq->rx_bytes += totlen;
696 ccb->bytes_per_intr += totlen;
698 masked_flags = flags & flags_cksum_prot_mask;
701 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
702 ((masked_flags == flags_tcp4) ||
703 (masked_flags == flags_udp4) ||
704 (masked_flags == flags_tcp6) ||
705 (masked_flags == flags_udp6))))
706 skb->ip_summed = CHECKSUM_UNNECESSARY;
708 skb_checksum_none_assert(skb);
710 if ((flags & BNA_CQ_EF_VLAN) &&
711 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
712 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
714 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
715 netif_receive_skb(skb);
717 napi_gro_frags(&rx_ctrl->napi);
720 BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
721 for (vec = 0; vec < nvecs; vec++) {
722 cmpl = &cq[ccb->producer_index];
724 BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
726 cmpl = &cq[ccb->producer_index];
729 napi_gro_flush(&rx_ctrl->napi, false);
730 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
731 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
733 bnad_rxq_post(bnad, ccb->rcb[0]);
735 bnad_rxq_post(bnad, ccb->rcb[1]);
741 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
743 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
744 struct napi_struct *napi = &rx_ctrl->napi;
746 if (likely(napi_schedule_prep(napi))) {
747 __napi_schedule(napi);
748 rx_ctrl->rx_schedule++;
752 /* MSIX Rx Path Handler */
754 bnad_msix_rx(int irq, void *data)
756 struct bna_ccb *ccb = (struct bna_ccb *)data;
759 ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
760 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
766 /* Interrupt handlers */
768 /* Mbox Interrupt Handlers */
770 bnad_msix_mbox_handler(int irq, void *data)
774 struct bnad *bnad = (struct bnad *)data;
776 spin_lock_irqsave(&bnad->bna_lock, flags);
777 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
778 spin_unlock_irqrestore(&bnad->bna_lock, flags);
782 bna_intr_status_get(&bnad->bna, intr_status);
784 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
785 bna_mbox_handler(&bnad->bna, intr_status);
787 spin_unlock_irqrestore(&bnad->bna_lock, flags);
793 bnad_isr(int irq, void *data)
798 struct bnad *bnad = (struct bnad *)data;
799 struct bnad_rx_info *rx_info;
800 struct bnad_rx_ctrl *rx_ctrl;
801 struct bna_tcb *tcb = NULL;
803 spin_lock_irqsave(&bnad->bna_lock, flags);
804 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
805 spin_unlock_irqrestore(&bnad->bna_lock, flags);
809 bna_intr_status_get(&bnad->bna, intr_status);
811 if (unlikely(!intr_status)) {
812 spin_unlock_irqrestore(&bnad->bna_lock, flags);
816 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
817 bna_mbox_handler(&bnad->bna, intr_status);
819 spin_unlock_irqrestore(&bnad->bna_lock, flags);
821 if (!BNA_IS_INTX_DATA_INTR(intr_status))
824 /* Process data interrupts */
826 for (i = 0; i < bnad->num_tx; i++) {
827 for (j = 0; j < bnad->num_txq_per_tx; j++) {
828 tcb = bnad->tx_info[i].tcb[j];
829 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
830 bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
834 for (i = 0; i < bnad->num_rx; i++) {
835 rx_info = &bnad->rx_info[i];
838 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
839 rx_ctrl = &rx_info->rx_ctrl[j];
841 bnad_netif_rx_schedule_poll(bnad,
849 * Called in interrupt / callback context
850 * with bna_lock held, so cfg_flags access is OK
853 bnad_enable_mbox_irq(struct bnad *bnad)
855 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
857 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
861 * Called with bnad->bna_lock held b'cos of
862 * bnad->cfg_flags access.
865 bnad_disable_mbox_irq(struct bnad *bnad)
867 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
869 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
873 bnad_set_netdev_perm_addr(struct bnad *bnad)
875 struct net_device *netdev = bnad->netdev;
877 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
878 if (is_zero_ether_addr(netdev->dev_addr))
879 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
882 /* Control Path Handlers */
886 bnad_cb_mbox_intr_enable(struct bnad *bnad)
888 bnad_enable_mbox_irq(bnad);
892 bnad_cb_mbox_intr_disable(struct bnad *bnad)
894 bnad_disable_mbox_irq(bnad);
898 bnad_cb_ioceth_ready(struct bnad *bnad)
900 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
901 complete(&bnad->bnad_completions.ioc_comp);
905 bnad_cb_ioceth_failed(struct bnad *bnad)
907 bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
908 complete(&bnad->bnad_completions.ioc_comp);
912 bnad_cb_ioceth_disabled(struct bnad *bnad)
914 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
915 complete(&bnad->bnad_completions.ioc_comp);
919 bnad_cb_enet_disabled(void *arg)
921 struct bnad *bnad = (struct bnad *)arg;
923 netif_carrier_off(bnad->netdev);
924 complete(&bnad->bnad_completions.enet_comp);
928 bnad_cb_ethport_link_status(struct bnad *bnad,
929 enum bna_link_status link_status)
931 bool link_up = false;
933 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
935 if (link_status == BNA_CEE_UP) {
936 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
937 BNAD_UPDATE_CTR(bnad, cee_toggle);
938 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
940 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
941 BNAD_UPDATE_CTR(bnad, cee_toggle);
942 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
946 if (!netif_carrier_ok(bnad->netdev)) {
948 printk(KERN_WARNING "bna: %s link up\n",
950 netif_carrier_on(bnad->netdev);
951 BNAD_UPDATE_CTR(bnad, link_toggle);
952 for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
953 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
955 struct bna_tcb *tcb =
956 bnad->tx_info[tx_id].tcb[tcb_id];
963 if (test_bit(BNAD_TXQ_TX_STARTED,
967 * Transmit Schedule */
968 printk(KERN_INFO "bna: %s %d "
975 BNAD_UPDATE_CTR(bnad,
981 BNAD_UPDATE_CTR(bnad,
988 if (netif_carrier_ok(bnad->netdev)) {
989 printk(KERN_WARNING "bna: %s link down\n",
991 netif_carrier_off(bnad->netdev);
992 BNAD_UPDATE_CTR(bnad, link_toggle);
998 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
1000 struct bnad *bnad = (struct bnad *)arg;
1002 complete(&bnad->bnad_completions.tx_comp);
1006 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
1008 struct bnad_tx_info *tx_info =
1009 (struct bnad_tx_info *)tcb->txq->tx->priv;
1012 tx_info->tcb[tcb->id] = tcb;
1016 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
1018 struct bnad_tx_info *tx_info =
1019 (struct bnad_tx_info *)tcb->txq->tx->priv;
1021 tx_info->tcb[tcb->id] = NULL;
1026 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
1028 struct bnad_rx_info *rx_info =
1029 (struct bnad_rx_info *)ccb->cq->rx->priv;
1031 rx_info->rx_ctrl[ccb->id].ccb = ccb;
1032 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
1036 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
1038 struct bnad_rx_info *rx_info =
1039 (struct bnad_rx_info *)ccb->cq->rx->priv;
1041 rx_info->rx_ctrl[ccb->id].ccb = NULL;
1045 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
1047 struct bnad_tx_info *tx_info =
1048 (struct bnad_tx_info *)tx->priv;
1049 struct bna_tcb *tcb;
1053 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1054 tcb = tx_info->tcb[i];
1058 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1059 netif_stop_subqueue(bnad->netdev, txq_id);
1060 printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
1061 bnad->netdev->name, txq_id);
1066 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
1068 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1069 struct bna_tcb *tcb;
1073 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1074 tcb = tx_info->tcb[i];
1079 BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
1080 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1081 BUG_ON(*(tcb->hw_consumer_index) != 0);
1083 if (netif_carrier_ok(bnad->netdev)) {
1084 printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
1085 bnad->netdev->name, txq_id);
1086 netif_wake_subqueue(bnad->netdev, txq_id);
1087 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
1092 * Workaround for first ioceth enable failure & we
1093 * get a 0 MAC address. We try to get the MAC address
1096 if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
1097 bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
1098 bnad_set_netdev_perm_addr(bnad);
1103 * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
1106 bnad_tx_cleanup(struct delayed_work *work)
1108 struct bnad_tx_info *tx_info =
1109 container_of(work, struct bnad_tx_info, tx_cleanup_work);
1110 struct bnad *bnad = NULL;
1111 struct bna_tcb *tcb;
1112 unsigned long flags;
1115 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1116 tcb = tx_info->tcb[i];
1122 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
1127 bnad_txq_cleanup(bnad, tcb);
1129 smp_mb__before_clear_bit();
1130 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
1134 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
1135 msecs_to_jiffies(1));
1139 spin_lock_irqsave(&bnad->bna_lock, flags);
1140 bna_tx_cleanup_complete(tx_info->tx);
1141 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1145 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
1147 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1148 struct bna_tcb *tcb;
1151 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1152 tcb = tx_info->tcb[i];
1157 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
1161 bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1163 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1164 struct bna_ccb *ccb;
1165 struct bnad_rx_ctrl *rx_ctrl;
1168 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1169 rx_ctrl = &rx_info->rx_ctrl[i];
1174 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
1177 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
1182 * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1185 bnad_rx_cleanup(void *work)
1187 struct bnad_rx_info *rx_info =
1188 container_of(work, struct bnad_rx_info, rx_cleanup_work);
1189 struct bnad_rx_ctrl *rx_ctrl;
1190 struct bnad *bnad = NULL;
1191 unsigned long flags;
1194 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1195 rx_ctrl = &rx_info->rx_ctrl[i];
1200 bnad = rx_ctrl->ccb->bnad;
1203 * Wait till the poll handler has exited
1204 * and nothing can be scheduled anymore
1206 napi_disable(&rx_ctrl->napi);
1208 bnad_cq_cleanup(bnad, rx_ctrl->ccb);
1209 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
1210 if (rx_ctrl->ccb->rcb[1])
1211 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
1214 spin_lock_irqsave(&bnad->bna_lock, flags);
1215 bna_rx_cleanup_complete(rx_info->rx);
1216 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1220 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1222 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1223 struct bna_ccb *ccb;
1224 struct bnad_rx_ctrl *rx_ctrl;
1227 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1228 rx_ctrl = &rx_info->rx_ctrl[i];
1233 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1236 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1239 queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
1243 bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1245 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1246 struct bna_ccb *ccb;
1247 struct bna_rcb *rcb;
1248 struct bnad_rx_ctrl *rx_ctrl;
1251 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1252 rx_ctrl = &rx_info->rx_ctrl[i];
1257 napi_enable(&rx_ctrl->napi);
1259 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1264 bnad_rxq_alloc_init(bnad, rcb);
1265 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1266 set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
1267 bnad_rxq_post(bnad, rcb);
1273 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1275 struct bnad *bnad = (struct bnad *)arg;
1277 complete(&bnad->bnad_completions.rx_comp);
1281 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1283 bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1284 complete(&bnad->bnad_completions.mcast_comp);
1288 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1289 struct bna_stats *stats)
1291 if (status == BNA_CB_SUCCESS)
1292 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1294 if (!netif_running(bnad->netdev) ||
1295 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1298 mod_timer(&bnad->stats_timer,
1299 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1303 bnad_cb_enet_mtu_set(struct bnad *bnad)
1305 bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1306 complete(&bnad->bnad_completions.mtu_comp);
1310 bnad_cb_completion(void *arg, enum bfa_status status)
1312 struct bnad_iocmd_comp *iocmd_comp =
1313 (struct bnad_iocmd_comp *)arg;
1315 iocmd_comp->comp_status = (u32) status;
1316 complete(&iocmd_comp->comp);
1319 /* Resource allocation, free functions */
1322 bnad_mem_free(struct bnad *bnad,
1323 struct bna_mem_info *mem_info)
1328 if (mem_info->mdl == NULL)
1331 for (i = 0; i < mem_info->num; i++) {
1332 if (mem_info->mdl[i].kva != NULL) {
1333 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1334 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1336 dma_free_coherent(&bnad->pcidev->dev,
1337 mem_info->mdl[i].len,
1338 mem_info->mdl[i].kva, dma_pa);
1340 kfree(mem_info->mdl[i].kva);
1343 kfree(mem_info->mdl);
1344 mem_info->mdl = NULL;
1348 bnad_mem_alloc(struct bnad *bnad,
1349 struct bna_mem_info *mem_info)
1354 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1355 mem_info->mdl = NULL;
1359 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1361 if (mem_info->mdl == NULL)
1364 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1365 for (i = 0; i < mem_info->num; i++) {
1366 mem_info->mdl[i].len = mem_info->len;
1367 mem_info->mdl[i].kva =
1368 dma_alloc_coherent(&bnad->pcidev->dev,
1369 mem_info->len, &dma_pa,
1371 if (mem_info->mdl[i].kva == NULL)
1374 BNA_SET_DMA_ADDR(dma_pa,
1375 &(mem_info->mdl[i].dma));
1378 for (i = 0; i < mem_info->num; i++) {
1379 mem_info->mdl[i].len = mem_info->len;
1380 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1382 if (mem_info->mdl[i].kva == NULL)
1390 bnad_mem_free(bnad, mem_info);
1394 /* Free IRQ for Mailbox */
1396 bnad_mbox_irq_free(struct bnad *bnad)
1399 unsigned long flags;
1401 spin_lock_irqsave(&bnad->bna_lock, flags);
1402 bnad_disable_mbox_irq(bnad);
1403 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1405 irq = BNAD_GET_MBOX_IRQ(bnad);
1406 free_irq(irq, bnad);
1410 * Allocates IRQ for Mailbox, but keep it disabled
1411 * This will be enabled once we get the mbox enable callback
1415 bnad_mbox_irq_alloc(struct bnad *bnad)
1418 unsigned long irq_flags, flags;
1420 irq_handler_t irq_handler;
1422 spin_lock_irqsave(&bnad->bna_lock, flags);
1423 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1424 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1425 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1428 irq_handler = (irq_handler_t)bnad_isr;
1429 irq = bnad->pcidev->irq;
1430 irq_flags = IRQF_SHARED;
1433 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1434 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1437 * Set the Mbox IRQ disable flag, so that the IRQ handler
1438 * called from request_irq() for SHARED IRQs do not execute
1440 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1442 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1444 err = request_irq(irq, irq_handler, irq_flags,
1445 bnad->mbox_irq_name, bnad);
1451 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1453 kfree(intr_info->idl);
1454 intr_info->idl = NULL;
1457 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1459 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1460 u32 txrx_id, struct bna_intr_info *intr_info)
1462 int i, vector_start = 0;
1464 unsigned long flags;
1466 spin_lock_irqsave(&bnad->bna_lock, flags);
1467 cfg_flags = bnad->cfg_flags;
1468 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1470 if (cfg_flags & BNAD_CF_MSIX) {
1471 intr_info->intr_type = BNA_INTR_T_MSIX;
1472 intr_info->idl = kcalloc(intr_info->num,
1473 sizeof(struct bna_intr_descr),
1475 if (!intr_info->idl)
1480 vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1484 vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1485 (bnad->num_tx * bnad->num_txq_per_tx) +
1493 for (i = 0; i < intr_info->num; i++)
1494 intr_info->idl[i].vector = vector_start + i;
1496 intr_info->intr_type = BNA_INTR_T_INTX;
1498 intr_info->idl = kcalloc(intr_info->num,
1499 sizeof(struct bna_intr_descr),
1501 if (!intr_info->idl)
1506 intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1510 intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1517 /* NOTE: Should be called for MSIX only
1518 * Unregisters Tx MSIX vector(s) from the kernel
1521 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1527 for (i = 0; i < num_txqs; i++) {
1528 if (tx_info->tcb[i] == NULL)
1531 vector_num = tx_info->tcb[i]->intr_vector;
1532 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1536 /* NOTE: Should be called for MSIX only
1537 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1540 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1541 u32 tx_id, int num_txqs)
1547 for (i = 0; i < num_txqs; i++) {
1548 vector_num = tx_info->tcb[i]->intr_vector;
1549 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1550 tx_id + tx_info->tcb[i]->id);
1551 err = request_irq(bnad->msix_table[vector_num].vector,
1552 (irq_handler_t)bnad_msix_tx, 0,
1553 tx_info->tcb[i]->name,
1563 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1567 /* NOTE: Should be called for MSIX only
1568 * Unregisters Rx MSIX vector(s) from the kernel
1571 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1577 for (i = 0; i < num_rxps; i++) {
1578 if (rx_info->rx_ctrl[i].ccb == NULL)
1581 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1582 free_irq(bnad->msix_table[vector_num].vector,
1583 rx_info->rx_ctrl[i].ccb);
1587 /* NOTE: Should be called for MSIX only
1588 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1591 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1592 u32 rx_id, int num_rxps)
1598 for (i = 0; i < num_rxps; i++) {
1599 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1600 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1602 rx_id + rx_info->rx_ctrl[i].ccb->id);
1603 err = request_irq(bnad->msix_table[vector_num].vector,
1604 (irq_handler_t)bnad_msix_rx, 0,
1605 rx_info->rx_ctrl[i].ccb->name,
1606 rx_info->rx_ctrl[i].ccb);
1615 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1619 /* Free Tx object Resources */
1621 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1625 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1626 if (res_info[i].res_type == BNA_RES_T_MEM)
1627 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1628 else if (res_info[i].res_type == BNA_RES_T_INTR)
1629 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1633 /* Allocates memory and interrupt resources for Tx object */
1635 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1640 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1641 if (res_info[i].res_type == BNA_RES_T_MEM)
1642 err = bnad_mem_alloc(bnad,
1643 &res_info[i].res_u.mem_info);
1644 else if (res_info[i].res_type == BNA_RES_T_INTR)
1645 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1646 &res_info[i].res_u.intr_info);
1653 bnad_tx_res_free(bnad, res_info);
1657 /* Free Rx object Resources */
1659 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1663 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1664 if (res_info[i].res_type == BNA_RES_T_MEM)
1665 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1666 else if (res_info[i].res_type == BNA_RES_T_INTR)
1667 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1671 /* Allocates memory and interrupt resources for Rx object */
1673 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1678 /* All memory needs to be allocated before setup_ccbs */
1679 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1680 if (res_info[i].res_type == BNA_RES_T_MEM)
1681 err = bnad_mem_alloc(bnad,
1682 &res_info[i].res_u.mem_info);
1683 else if (res_info[i].res_type == BNA_RES_T_INTR)
1684 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1685 &res_info[i].res_u.intr_info);
1692 bnad_rx_res_free(bnad, res_info);
1696 /* Timer callbacks */
1699 bnad_ioc_timeout(unsigned long data)
1701 struct bnad *bnad = (struct bnad *)data;
1702 unsigned long flags;
1704 spin_lock_irqsave(&bnad->bna_lock, flags);
1705 bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
1706 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1710 bnad_ioc_hb_check(unsigned long data)
1712 struct bnad *bnad = (struct bnad *)data;
1713 unsigned long flags;
1715 spin_lock_irqsave(&bnad->bna_lock, flags);
1716 bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
1717 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1721 bnad_iocpf_timeout(unsigned long data)
1723 struct bnad *bnad = (struct bnad *)data;
1724 unsigned long flags;
1726 spin_lock_irqsave(&bnad->bna_lock, flags);
1727 bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
1728 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1732 bnad_iocpf_sem_timeout(unsigned long data)
1734 struct bnad *bnad = (struct bnad *)data;
1735 unsigned long flags;
1737 spin_lock_irqsave(&bnad->bna_lock, flags);
1738 bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
1739 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1743 * All timer routines use bnad->bna_lock to protect against
1744 * the following race, which may occur in case of no locking:
1752 /* b) Dynamic Interrupt Moderation Timer */
1754 bnad_dim_timeout(unsigned long data)
1756 struct bnad *bnad = (struct bnad *)data;
1757 struct bnad_rx_info *rx_info;
1758 struct bnad_rx_ctrl *rx_ctrl;
1760 unsigned long flags;
1762 if (!netif_carrier_ok(bnad->netdev))
1765 spin_lock_irqsave(&bnad->bna_lock, flags);
1766 for (i = 0; i < bnad->num_rx; i++) {
1767 rx_info = &bnad->rx_info[i];
1770 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1771 rx_ctrl = &rx_info->rx_ctrl[j];
1774 bna_rx_dim_update(rx_ctrl->ccb);
1778 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1779 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1780 mod_timer(&bnad->dim_timer,
1781 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1782 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1785 /* c) Statistics Timer */
1787 bnad_stats_timeout(unsigned long data)
1789 struct bnad *bnad = (struct bnad *)data;
1790 unsigned long flags;
1792 if (!netif_running(bnad->netdev) ||
1793 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1796 spin_lock_irqsave(&bnad->bna_lock, flags);
1797 bna_hw_stats_get(&bnad->bna);
1798 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1802 * Set up timer for DIM
1803 * Called with bnad->bna_lock held
1806 bnad_dim_timer_start(struct bnad *bnad)
1808 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1809 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1810 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1811 (unsigned long)bnad);
1812 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1813 mod_timer(&bnad->dim_timer,
1814 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1819 * Set up timer for statistics
1820 * Called with mutex_lock(&bnad->conf_mutex) held
1823 bnad_stats_timer_start(struct bnad *bnad)
1825 unsigned long flags;
1827 spin_lock_irqsave(&bnad->bna_lock, flags);
1828 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1829 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1830 (unsigned long)bnad);
1831 mod_timer(&bnad->stats_timer,
1832 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1834 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1838 * Stops the stats timer
1839 * Called with mutex_lock(&bnad->conf_mutex) held
1842 bnad_stats_timer_stop(struct bnad *bnad)
1845 unsigned long flags;
1847 spin_lock_irqsave(&bnad->bna_lock, flags);
1848 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1850 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1852 del_timer_sync(&bnad->stats_timer);
1858 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1860 int i = 1; /* Index 0 has broadcast address */
1861 struct netdev_hw_addr *mc_addr;
1863 netdev_for_each_mc_addr(mc_addr, netdev) {
1864 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1871 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1873 struct bnad_rx_ctrl *rx_ctrl =
1874 container_of(napi, struct bnad_rx_ctrl, napi);
1875 struct bnad *bnad = rx_ctrl->bnad;
1878 rx_ctrl->rx_poll_ctr++;
1880 if (!netif_carrier_ok(bnad->netdev))
1883 rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
1888 napi_complete(napi);
1890 rx_ctrl->rx_complete++;
1893 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1898 #define BNAD_NAPI_POLL_QUOTA 64
1900 bnad_napi_add(struct bnad *bnad, u32 rx_id)
1902 struct bnad_rx_ctrl *rx_ctrl;
1905 /* Initialize & enable NAPI */
1906 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1907 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1908 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1909 bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1914 bnad_napi_delete(struct bnad *bnad, u32 rx_id)
1918 /* First disable and then clean up */
1919 for (i = 0; i < bnad->num_rxp_per_rx; i++)
1920 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1923 /* Should be held with conf_lock held */
1925 bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
1927 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1928 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1929 unsigned long flags;
1934 init_completion(&bnad->bnad_completions.tx_comp);
1935 spin_lock_irqsave(&bnad->bna_lock, flags);
1936 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1937 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1938 wait_for_completion(&bnad->bnad_completions.tx_comp);
1940 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1941 bnad_tx_msix_unregister(bnad, tx_info,
1942 bnad->num_txq_per_tx);
1944 spin_lock_irqsave(&bnad->bna_lock, flags);
1945 bna_tx_destroy(tx_info->tx);
1946 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1951 bnad_tx_res_free(bnad, res_info);
1954 /* Should be held with conf_lock held */
1956 bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1959 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1960 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1961 struct bna_intr_info *intr_info =
1962 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1963 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1964 static const struct bna_tx_event_cbfn tx_cbfn = {
1965 .tcb_setup_cbfn = bnad_cb_tcb_setup,
1966 .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1967 .tx_stall_cbfn = bnad_cb_tx_stall,
1968 .tx_resume_cbfn = bnad_cb_tx_resume,
1969 .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1973 unsigned long flags;
1975 tx_info->tx_id = tx_id;
1977 /* Initialize the Tx object configuration */
1978 tx_config->num_txq = bnad->num_txq_per_tx;
1979 tx_config->txq_depth = bnad->txq_depth;
1980 tx_config->tx_type = BNA_TX_T_REGULAR;
1981 tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1983 /* Get BNA's resource requirement for one tx object */
1984 spin_lock_irqsave(&bnad->bna_lock, flags);
1985 bna_tx_res_req(bnad->num_txq_per_tx,
1986 bnad->txq_depth, res_info);
1987 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1989 /* Fill Unmap Q memory requirements */
1990 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1991 bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
1994 /* Allocate resources */
1995 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1999 /* Ask BNA to create one Tx object, supplying required resources */
2000 spin_lock_irqsave(&bnad->bna_lock, flags);
2001 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
2003 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2010 INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
2011 (work_func_t)bnad_tx_cleanup);
2013 /* Register ISR for the Tx object */
2014 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2015 err = bnad_tx_msix_register(bnad, tx_info,
2016 tx_id, bnad->num_txq_per_tx);
2021 spin_lock_irqsave(&bnad->bna_lock, flags);
2023 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2028 spin_lock_irqsave(&bnad->bna_lock, flags);
2029 bna_tx_destroy(tx_info->tx);
2030 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2034 bnad_tx_res_free(bnad, res_info);
2038 /* Setup the rx config for bna_rx_create */
2039 /* bnad decides the configuration */
2041 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
2043 memset(rx_config, 0, sizeof(*rx_config));
2044 rx_config->rx_type = BNA_RX_T_REGULAR;
2045 rx_config->num_paths = bnad->num_rxp_per_rx;
2046 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
2048 if (bnad->num_rxp_per_rx > 1) {
2049 rx_config->rss_status = BNA_STATUS_T_ENABLED;
2050 rx_config->rss_config.hash_type =
2051 (BFI_ENET_RSS_IPV6 |
2052 BFI_ENET_RSS_IPV6_TCP |
2054 BFI_ENET_RSS_IPV4_TCP);
2055 rx_config->rss_config.hash_mask =
2056 bnad->num_rxp_per_rx - 1;
2057 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
2058 sizeof(rx_config->rss_config.toeplitz_hash_key));
2060 rx_config->rss_status = BNA_STATUS_T_DISABLED;
2061 memset(&rx_config->rss_config, 0,
2062 sizeof(rx_config->rss_config));
2065 rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
2066 rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
2068 /* BNA_RXP_SINGLE - one data-buffer queue
2069 * BNA_RXP_SLR - one small-buffer and one large-buffer queues
2070 * BNA_RXP_HDS - one header-buffer and one data-buffer queues
2072 /* TODO: configurable param for queue type */
2073 rx_config->rxp_type = BNA_RXP_SLR;
2075 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
2076 rx_config->frame_size > 4096) {
2077 /* though size_routing_enable is set in SLR,
2078 * small packets may get routed to same rxq.
2079 * set buf_size to 2048 instead of PAGE_SIZE.
2081 rx_config->q0_buf_size = 2048;
2082 /* this should be in multiples of 2 */
2083 rx_config->q0_num_vecs = 4;
2084 rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
2085 rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
2087 rx_config->q0_buf_size = rx_config->frame_size;
2088 rx_config->q0_num_vecs = 1;
2089 rx_config->q0_depth = bnad->rxq_depth;
2092 /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
2093 if (rx_config->rxp_type == BNA_RXP_SLR) {
2094 rx_config->q1_depth = bnad->rxq_depth;
2095 rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
2098 rx_config->vlan_strip_status =
2099 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ?
2100 BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
2104 bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
2106 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2109 for (i = 0; i < bnad->num_rxp_per_rx; i++)
2110 rx_info->rx_ctrl[i].bnad = bnad;
2113 /* Called with mutex_lock(&bnad->conf_mutex) held */
2115 bnad_reinit_rx(struct bnad *bnad)
2117 struct net_device *netdev = bnad->netdev;
2118 u32 err = 0, current_err = 0;
2119 u32 rx_id = 0, count = 0;
2120 unsigned long flags;
2122 /* destroy and create new rx objects */
2123 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2124 if (!bnad->rx_info[rx_id].rx)
2126 bnad_destroy_rx(bnad, rx_id);
2129 spin_lock_irqsave(&bnad->bna_lock, flags);
2130 bna_enet_mtu_set(&bnad->bna.enet,
2131 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2132 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2134 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2136 current_err = bnad_setup_rx(bnad, rx_id);
2137 if (current_err && !err) {
2139 pr_err("RXQ:%u setup failed\n", rx_id);
2143 /* restore rx configuration */
2144 if (bnad->rx_info[0].rx && !err) {
2145 bnad_restore_vlans(bnad, 0);
2146 bnad_enable_default_bcast(bnad);
2147 spin_lock_irqsave(&bnad->bna_lock, flags);
2148 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2149 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2150 bnad_set_rx_mode(netdev);
2156 /* Called with bnad_conf_lock() held */
2158 bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
2160 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2161 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2162 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2163 unsigned long flags;
2170 spin_lock_irqsave(&bnad->bna_lock, flags);
2171 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
2172 test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
2173 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
2176 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2178 del_timer_sync(&bnad->dim_timer);
2181 init_completion(&bnad->bnad_completions.rx_comp);
2182 spin_lock_irqsave(&bnad->bna_lock, flags);
2183 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
2184 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2185 wait_for_completion(&bnad->bnad_completions.rx_comp);
2187 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
2188 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
2190 bnad_napi_delete(bnad, rx_id);
2192 spin_lock_irqsave(&bnad->bna_lock, flags);
2193 bna_rx_destroy(rx_info->rx);
2197 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2199 bnad_rx_res_free(bnad, res_info);
2202 /* Called with mutex_lock(&bnad->conf_mutex) held */
2204 bnad_setup_rx(struct bnad *bnad, u32 rx_id)
2207 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2208 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2209 struct bna_intr_info *intr_info =
2210 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2211 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2212 static const struct bna_rx_event_cbfn rx_cbfn = {
2213 .rcb_setup_cbfn = NULL,
2214 .rcb_destroy_cbfn = NULL,
2215 .ccb_setup_cbfn = bnad_cb_ccb_setup,
2216 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
2217 .rx_stall_cbfn = bnad_cb_rx_stall,
2218 .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
2219 .rx_post_cbfn = bnad_cb_rx_post,
2222 unsigned long flags;
2224 rx_info->rx_id = rx_id;
2226 /* Initialize the Rx object configuration */
2227 bnad_init_rx_config(bnad, rx_config);
2229 /* Get BNA's resource requirement for one Rx object */
2230 spin_lock_irqsave(&bnad->bna_lock, flags);
2231 bna_rx_res_req(rx_config, res_info);
2232 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2234 /* Fill Unmap Q memory requirements */
2235 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
2236 rx_config->num_paths,
2237 (rx_config->q0_depth *
2238 sizeof(struct bnad_rx_unmap)) +
2239 sizeof(struct bnad_rx_unmap_q));
2241 if (rx_config->rxp_type != BNA_RXP_SINGLE) {
2242 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
2243 rx_config->num_paths,
2244 (rx_config->q1_depth *
2245 sizeof(struct bnad_rx_unmap) +
2246 sizeof(struct bnad_rx_unmap_q)));
2248 /* Allocate resource */
2249 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
2253 bnad_rx_ctrl_init(bnad, rx_id);
2255 /* Ask BNA to create one Rx object, supplying required resources */
2256 spin_lock_irqsave(&bnad->bna_lock, flags);
2257 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
2261 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2265 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2267 INIT_WORK(&rx_info->rx_cleanup_work,
2268 (work_func_t)(bnad_rx_cleanup));
2271 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2272 * so that IRQ handler cannot schedule NAPI at this point.
2274 bnad_napi_add(bnad, rx_id);
2276 /* Register ISR for the Rx object */
2277 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2278 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2279 rx_config->num_paths);
2284 spin_lock_irqsave(&bnad->bna_lock, flags);
2286 /* Set up Dynamic Interrupt Moderation Vector */
2287 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2288 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
2290 /* Enable VLAN filtering only on the default Rx */
2291 bna_rx_vlanfilter_enable(rx);
2293 /* Start the DIM timer */
2294 bnad_dim_timer_start(bnad);
2298 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2303 bnad_destroy_rx(bnad, rx_id);
2307 /* Called with conf_lock & bnad->bna_lock held */
2309 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2311 struct bnad_tx_info *tx_info;
2313 tx_info = &bnad->tx_info[0];
2317 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2320 /* Called with conf_lock & bnad->bna_lock held */
2322 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2324 struct bnad_rx_info *rx_info;
2327 for (i = 0; i < bnad->num_rx; i++) {
2328 rx_info = &bnad->rx_info[i];
2331 bna_rx_coalescing_timeo_set(rx_info->rx,
2332 bnad->rx_coalescing_timeo);
2337 * Called with bnad->bna_lock held
2340 bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
2344 if (!is_valid_ether_addr(mac_addr))
2345 return -EADDRNOTAVAIL;
2347 /* If datapath is down, pretend everything went through */
2348 if (!bnad->rx_info[0].rx)
2351 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
2352 if (ret != BNA_CB_SUCCESS)
2353 return -EADDRNOTAVAIL;
2358 /* Should be called with conf_lock held */
2360 bnad_enable_default_bcast(struct bnad *bnad)
2362 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2364 unsigned long flags;
2366 init_completion(&bnad->bnad_completions.mcast_comp);
2368 spin_lock_irqsave(&bnad->bna_lock, flags);
2369 ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
2370 bnad_cb_rx_mcast_add);
2371 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2373 if (ret == BNA_CB_SUCCESS)
2374 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2378 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2384 /* Called with mutex_lock(&bnad->conf_mutex) held */
2386 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2389 unsigned long flags;
2391 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2392 spin_lock_irqsave(&bnad->bna_lock, flags);
2393 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2394 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2398 /* Statistics utilities */
2400 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2404 for (i = 0; i < bnad->num_rx; i++) {
2405 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2406 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2407 stats->rx_packets += bnad->rx_info[i].
2408 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2409 stats->rx_bytes += bnad->rx_info[i].
2410 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2411 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2412 bnad->rx_info[i].rx_ctrl[j].ccb->
2414 stats->rx_packets +=
2415 bnad->rx_info[i].rx_ctrl[j].
2416 ccb->rcb[1]->rxq->rx_packets;
2418 bnad->rx_info[i].rx_ctrl[j].
2419 ccb->rcb[1]->rxq->rx_bytes;
2424 for (i = 0; i < bnad->num_tx; i++) {
2425 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2426 if (bnad->tx_info[i].tcb[j]) {
2427 stats->tx_packets +=
2428 bnad->tx_info[i].tcb[j]->txq->tx_packets;
2430 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2437 * Must be called with the bna_lock held.
2440 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2442 struct bfi_enet_stats_mac *mac_stats;
2446 mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2448 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2449 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2450 mac_stats->rx_undersize;
2451 stats->tx_errors = mac_stats->tx_fcs_error +
2452 mac_stats->tx_undersize;
2453 stats->rx_dropped = mac_stats->rx_drop;
2454 stats->tx_dropped = mac_stats->tx_drop;
2455 stats->multicast = mac_stats->rx_multicast;
2456 stats->collisions = mac_stats->tx_total_collision;
2458 stats->rx_length_errors = mac_stats->rx_frame_length_error;
2460 /* receive ring buffer overflow ?? */
2462 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2463 stats->rx_frame_errors = mac_stats->rx_alignment_error;
2464 /* recv'r fifo overrun */
2465 bmap = bna_rx_rid_mask(&bnad->bna);
2466 for (i = 0; bmap; i++) {
2468 stats->rx_fifo_errors +=
2469 bnad->stats.bna_stats->
2470 hw_stats.rxf_stats[i].frame_drops;
2478 bnad_mbox_irq_sync(struct bnad *bnad)
2481 unsigned long flags;
2483 spin_lock_irqsave(&bnad->bna_lock, flags);
2484 if (bnad->cfg_flags & BNAD_CF_MSIX)
2485 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2487 irq = bnad->pcidev->irq;
2488 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2490 synchronize_irq(irq);
2493 /* Utility used by bnad_start_xmit, for doing TSO */
2495 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2499 if (skb_header_cloned(skb)) {
2500 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2502 BNAD_UPDATE_CTR(bnad, tso_err);
2508 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2509 * excluding the length field.
2511 if (skb->protocol == htons(ETH_P_IP)) {
2512 struct iphdr *iph = ip_hdr(skb);
2514 /* Do we really need these? */
2518 tcp_hdr(skb)->check =
2519 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2521 BNAD_UPDATE_CTR(bnad, tso4);
2523 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2525 ipv6h->payload_len = 0;
2526 tcp_hdr(skb)->check =
2527 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2529 BNAD_UPDATE_CTR(bnad, tso6);
2536 * Initialize Q numbers depending on Rx Paths
2537 * Called with bnad->bna_lock held, because of cfg_flags
2541 bnad_q_num_init(struct bnad *bnad)
2545 rxps = min((uint)num_online_cpus(),
2546 (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
2548 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2549 rxps = 1; /* INTx */
2553 bnad->num_rxp_per_rx = rxps;
2554 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2558 * Adjusts the Q numbers, given a number of msix vectors
2559 * Give preference to RSS as opposed to Tx priority Queues,
2560 * in such a case, just use 1 Tx Q
2561 * Called with bnad->bna_lock held b'cos of cfg_flags access
2564 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2566 bnad->num_txq_per_tx = 1;
2567 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2568 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2569 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2570 bnad->num_rxp_per_rx = msix_vectors -
2571 (bnad->num_tx * bnad->num_txq_per_tx) -
2572 BNAD_MAILBOX_MSIX_VECTORS;
2574 bnad->num_rxp_per_rx = 1;
2577 /* Enable / disable ioceth */
2579 bnad_ioceth_disable(struct bnad *bnad)
2581 unsigned long flags;
2584 spin_lock_irqsave(&bnad->bna_lock, flags);
2585 init_completion(&bnad->bnad_completions.ioc_comp);
2586 bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2587 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2589 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2590 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2592 err = bnad->bnad_completions.ioc_comp_status;
2597 bnad_ioceth_enable(struct bnad *bnad)
2600 unsigned long flags;
2602 spin_lock_irqsave(&bnad->bna_lock, flags);
2603 init_completion(&bnad->bnad_completions.ioc_comp);
2604 bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2605 bna_ioceth_enable(&bnad->bna.ioceth);
2606 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2608 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2609 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2611 err = bnad->bnad_completions.ioc_comp_status;
2616 /* Free BNA resources */
2618 bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2623 for (i = 0; i < res_val_max; i++)
2624 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2627 /* Allocates memory and interrupt resources for BNA */
2629 bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2634 for (i = 0; i < res_val_max; i++) {
2635 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2642 bnad_res_free(bnad, res_info, res_val_max);
2646 /* Interrupt enable / disable */
2648 bnad_enable_msix(struct bnad *bnad)
2651 unsigned long flags;
2653 spin_lock_irqsave(&bnad->bna_lock, flags);
2654 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2655 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2658 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2660 if (bnad->msix_table)
2664 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2666 if (!bnad->msix_table)
2669 for (i = 0; i < bnad->msix_num; i++)
2670 bnad->msix_table[i].entry = i;
2672 ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table,
2676 } else if (ret < bnad->msix_num) {
2677 pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
2678 ret, bnad->msix_num);
2680 spin_lock_irqsave(&bnad->bna_lock, flags);
2681 /* ret = #of vectors that we got */
2682 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2683 (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
2684 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2686 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2687 BNAD_MAILBOX_MSIX_VECTORS;
2689 if (bnad->msix_num > ret) {
2690 pci_disable_msix(bnad->pcidev);
2695 pci_intx(bnad->pcidev, 0);
2700 pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
2702 kfree(bnad->msix_table);
2703 bnad->msix_table = NULL;
2705 spin_lock_irqsave(&bnad->bna_lock, flags);
2706 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2707 bnad_q_num_init(bnad);
2708 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2712 bnad_disable_msix(struct bnad *bnad)
2715 unsigned long flags;
2717 spin_lock_irqsave(&bnad->bna_lock, flags);
2718 cfg_flags = bnad->cfg_flags;
2719 if (bnad->cfg_flags & BNAD_CF_MSIX)
2720 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2721 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2723 if (cfg_flags & BNAD_CF_MSIX) {
2724 pci_disable_msix(bnad->pcidev);
2725 kfree(bnad->msix_table);
2726 bnad->msix_table = NULL;
2730 /* Netdev entry points */
2732 bnad_open(struct net_device *netdev)
2735 struct bnad *bnad = netdev_priv(netdev);
2736 struct bna_pause_config pause_config;
2737 unsigned long flags;
2739 mutex_lock(&bnad->conf_mutex);
2742 err = bnad_setup_tx(bnad, 0);
2747 err = bnad_setup_rx(bnad, 0);
2752 pause_config.tx_pause = 0;
2753 pause_config.rx_pause = 0;
2755 spin_lock_irqsave(&bnad->bna_lock, flags);
2756 bna_enet_mtu_set(&bnad->bna.enet,
2757 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2758 bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
2759 bna_enet_enable(&bnad->bna.enet);
2760 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2762 /* Enable broadcast */
2763 bnad_enable_default_bcast(bnad);
2765 /* Restore VLANs, if any */
2766 bnad_restore_vlans(bnad, 0);
2768 /* Set the UCAST address */
2769 spin_lock_irqsave(&bnad->bna_lock, flags);
2770 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2771 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2773 /* Start the stats timer */
2774 bnad_stats_timer_start(bnad);
2776 mutex_unlock(&bnad->conf_mutex);
2781 bnad_destroy_tx(bnad, 0);
2784 mutex_unlock(&bnad->conf_mutex);
2789 bnad_stop(struct net_device *netdev)
2791 struct bnad *bnad = netdev_priv(netdev);
2792 unsigned long flags;
2794 mutex_lock(&bnad->conf_mutex);
2796 /* Stop the stats timer */
2797 bnad_stats_timer_stop(bnad);
2799 init_completion(&bnad->bnad_completions.enet_comp);
2801 spin_lock_irqsave(&bnad->bna_lock, flags);
2802 bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2803 bnad_cb_enet_disabled);
2804 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2806 wait_for_completion(&bnad->bnad_completions.enet_comp);
2808 bnad_destroy_tx(bnad, 0);
2809 bnad_destroy_rx(bnad, 0);
2811 /* Synchronize mailbox IRQ */
2812 bnad_mbox_irq_sync(bnad);
2814 mutex_unlock(&bnad->conf_mutex);
2820 /* Returns 0 for success */
2822 bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2823 struct sk_buff *skb, struct bna_txq_entry *txqent)
2829 if (vlan_tx_tag_present(skb)) {
2830 vlan_tag = (u16)vlan_tx_tag_get(skb);
2831 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2833 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2834 vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT)
2835 | (vlan_tag & 0x1fff);
2836 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2838 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2840 if (skb_is_gso(skb)) {
2841 gso_size = skb_shinfo(skb)->gso_size;
2842 if (unlikely(gso_size > bnad->netdev->mtu)) {
2843 BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2846 if (unlikely((gso_size + skb_transport_offset(skb) +
2847 tcp_hdrlen(skb)) >= skb->len)) {
2848 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2849 txqent->hdr.wi.lso_mss = 0;
2850 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2852 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO);
2853 txqent->hdr.wi.lso_mss = htons(gso_size);
2856 if (bnad_tso_prepare(bnad, skb)) {
2857 BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2861 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2862 txqent->hdr.wi.l4_hdr_size_n_offset =
2863 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2864 tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
2866 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2867 txqent->hdr.wi.lso_mss = 0;
2869 if (unlikely(skb->len > (bnad->netdev->mtu + ETH_HLEN))) {
2870 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2874 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2877 if (skb->protocol == htons(ETH_P_IP))
2878 proto = ip_hdr(skb)->protocol;
2879 #ifdef NETIF_F_IPV6_CSUM
2880 else if (skb->protocol == htons(ETH_P_IPV6)) {
2881 /* nexthdr may not be TCP immediately. */
2882 proto = ipv6_hdr(skb)->nexthdr;
2885 if (proto == IPPROTO_TCP) {
2886 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2887 txqent->hdr.wi.l4_hdr_size_n_offset =
2888 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2889 (0, skb_transport_offset(skb)));
2891 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2893 if (unlikely(skb_headlen(skb) <
2894 skb_transport_offset(skb) +
2896 BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2899 } else if (proto == IPPROTO_UDP) {
2900 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2901 txqent->hdr.wi.l4_hdr_size_n_offset =
2902 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2903 (0, skb_transport_offset(skb)));
2905 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2906 if (unlikely(skb_headlen(skb) <
2907 skb_transport_offset(skb) +
2908 sizeof(struct udphdr))) {
2909 BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2914 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2918 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2921 txqent->hdr.wi.flags = htons(flags);
2922 txqent->hdr.wi.frame_length = htonl(skb->len);
2928 * bnad_start_xmit : Netdev entry point for Transmit
2929 * Called under lock held by net_device
2932 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2934 struct bnad *bnad = netdev_priv(netdev);
2936 struct bna_tcb *tcb = NULL;
2937 struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
2938 u32 prod, q_depth, vect_id;
2939 u32 wis, vectors, len;
2941 dma_addr_t dma_addr;
2942 struct bna_txq_entry *txqent;
2944 len = skb_headlen(skb);
2946 /* Sanity checks for the skb */
2948 if (unlikely(skb->len <= ETH_HLEN)) {
2949 dev_kfree_skb_any(skb);
2950 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2951 return NETDEV_TX_OK;
2953 if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
2954 dev_kfree_skb_any(skb);
2955 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2956 return NETDEV_TX_OK;
2958 if (unlikely(len == 0)) {
2959 dev_kfree_skb_any(skb);
2960 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2961 return NETDEV_TX_OK;
2964 tcb = bnad->tx_info[0].tcb[txq_id];
2967 * Takes care of the Tx that is scheduled between clearing the flag
2968 * and the netif_tx_stop_all_queues() call.
2970 if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2971 dev_kfree_skb_any(skb);
2972 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2973 return NETDEV_TX_OK;
2976 q_depth = tcb->q_depth;
2977 prod = tcb->producer_index;
2978 unmap_q = tcb->unmap_q;
2980 vectors = 1 + skb_shinfo(skb)->nr_frags;
2981 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2983 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2984 dev_kfree_skb_any(skb);
2985 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2986 return NETDEV_TX_OK;
2989 /* Check for available TxQ resources */
2990 if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2991 if ((*tcb->hw_consumer_index != tcb->consumer_index) &&
2992 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2994 sent = bnad_txcmpl_process(bnad, tcb);
2995 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2996 bna_ib_ack(tcb->i_dbell, sent);
2997 smp_mb__before_clear_bit();
2998 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
3000 netif_stop_queue(netdev);
3001 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
3006 * Check again to deal with race condition between
3007 * netif_stop_queue here, and netif_wake_queue in
3008 * interrupt handler which is not inside netif tx lock.
3010 if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
3011 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
3012 return NETDEV_TX_BUSY;
3014 netif_wake_queue(netdev);
3015 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
3019 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3020 head_unmap = &unmap_q[prod];
3022 /* Program the opcode, flags, frame_len, num_vectors in WI */
3023 if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
3024 dev_kfree_skb_any(skb);
3025 return NETDEV_TX_OK;
3027 txqent->hdr.wi.reserved = 0;
3028 txqent->hdr.wi.num_vectors = vectors;
3030 head_unmap->skb = skb;
3031 head_unmap->nvecs = 0;
3033 /* Program the vectors */
3035 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
3036 len, DMA_TO_DEVICE);
3037 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
3038 txqent->vector[0].length = htons(len);
3039 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
3040 head_unmap->nvecs++;
3042 for (i = 0, vect_id = 0; i < vectors - 1; i++) {
3043 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
3044 u32 size = skb_frag_size(frag);
3046 if (unlikely(size == 0)) {
3047 /* Undo the changes starting at tcb->producer_index */
3048 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3049 tcb->producer_index);
3050 dev_kfree_skb_any(skb);
3051 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
3052 return NETDEV_TX_OK;
3058 if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
3060 BNA_QE_INDX_INC(prod, q_depth);
3061 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3062 txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
3063 unmap = &unmap_q[prod];
3066 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
3067 0, size, DMA_TO_DEVICE);
3068 dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
3069 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
3070 txqent->vector[vect_id].length = htons(size);
3071 dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
3073 head_unmap->nvecs++;
3076 if (unlikely(len != skb->len)) {
3077 /* Undo the changes starting at tcb->producer_index */
3078 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
3079 dev_kfree_skb_any(skb);
3080 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
3081 return NETDEV_TX_OK;
3084 BNA_QE_INDX_INC(prod, q_depth);
3085 tcb->producer_index = prod;
3089 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3090 return NETDEV_TX_OK;
3092 skb_tx_timestamp(skb);
3094 bna_txq_prod_indx_doorbell(tcb);
3097 return NETDEV_TX_OK;
3101 * Used spin_lock to synchronize reading of stats structures, which
3102 * is written by BNA under the same lock.
3104 static struct rtnl_link_stats64 *
3105 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
3107 struct bnad *bnad = netdev_priv(netdev);
3108 unsigned long flags;
3110 spin_lock_irqsave(&bnad->bna_lock, flags);
3112 bnad_netdev_qstats_fill(bnad, stats);
3113 bnad_netdev_hwstats_fill(bnad, stats);
3115 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3121 bnad_set_rx_ucast_fltr(struct bnad *bnad)
3123 struct net_device *netdev = bnad->netdev;
3124 int uc_count = netdev_uc_count(netdev);
3125 enum bna_cb_status ret;
3127 struct netdev_hw_addr *ha;
3130 if (netdev_uc_empty(bnad->netdev)) {
3131 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
3135 if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
3138 mac_list = kzalloc(uc_count * ETH_ALEN, GFP_ATOMIC);
3139 if (mac_list == NULL)
3143 netdev_for_each_uc_addr(ha, netdev) {
3144 memcpy(&mac_list[entry * ETH_ALEN],
3145 &ha->addr[0], ETH_ALEN);
3149 ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry,
3153 if (ret != BNA_CB_SUCCESS)
3158 /* ucast packets not in UCAM are routed to default function */
3160 bnad->cfg_flags |= BNAD_CF_DEFAULT;
3161 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
3165 bnad_set_rx_mcast_fltr(struct bnad *bnad)
3167 struct net_device *netdev = bnad->netdev;
3168 int mc_count = netdev_mc_count(netdev);
3169 enum bna_cb_status ret;
3172 if (netdev->flags & IFF_ALLMULTI)
3175 if (netdev_mc_empty(netdev))
3178 if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
3181 mac_list = kzalloc((mc_count + 1) * ETH_ALEN, GFP_ATOMIC);
3183 if (mac_list == NULL)
3186 memcpy(&mac_list[0], &bnad_bcast_addr[0], ETH_ALEN);
3188 /* copy rest of the MCAST addresses */
3189 bnad_netdev_mc_list_get(netdev, mac_list);
3190 ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
3194 if (ret != BNA_CB_SUCCESS)
3200 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
3201 bna_rx_mcast_delall(bnad->rx_info[0].rx, NULL);
3205 bnad_set_rx_mode(struct net_device *netdev)
3207 struct bnad *bnad = netdev_priv(netdev);
3208 enum bna_rxmode new_mode, mode_mask;
3209 unsigned long flags;
3211 spin_lock_irqsave(&bnad->bna_lock, flags);
3213 if (bnad->rx_info[0].rx == NULL) {
3214 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3218 /* clear bnad flags to update it with new settings */
3219 bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
3223 if (netdev->flags & IFF_PROMISC) {
3224 new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
3225 bnad->cfg_flags |= BNAD_CF_PROMISC;
3227 bnad_set_rx_mcast_fltr(bnad);
3229 if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
3230 new_mode |= BNA_RXMODE_ALLMULTI;
3232 bnad_set_rx_ucast_fltr(bnad);
3234 if (bnad->cfg_flags & BNAD_CF_DEFAULT)
3235 new_mode |= BNA_RXMODE_DEFAULT;
3238 mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
3239 BNA_RXMODE_ALLMULTI;
3240 bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL);
3242 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3246 * bna_lock is used to sync writes to netdev->addr
3247 * conf_lock cannot be used since this call may be made
3248 * in a non-blocking context.
3251 bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
3254 struct bnad *bnad = netdev_priv(netdev);
3255 struct sockaddr *sa = (struct sockaddr *)mac_addr;
3256 unsigned long flags;
3258 spin_lock_irqsave(&bnad->bna_lock, flags);
3260 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
3263 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
3265 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3271 bnad_mtu_set(struct bnad *bnad, int frame_size)
3273 unsigned long flags;
3275 init_completion(&bnad->bnad_completions.mtu_comp);
3277 spin_lock_irqsave(&bnad->bna_lock, flags);
3278 bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
3279 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3281 wait_for_completion(&bnad->bnad_completions.mtu_comp);
3283 return bnad->bnad_completions.mtu_comp_status;
3287 bnad_change_mtu(struct net_device *netdev, int new_mtu)
3290 struct bnad *bnad = netdev_priv(netdev);
3291 u32 rx_count = 0, frame, new_frame;
3293 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
3296 mutex_lock(&bnad->conf_mutex);
3299 netdev->mtu = new_mtu;
3301 frame = BNAD_FRAME_SIZE(mtu);
3302 new_frame = BNAD_FRAME_SIZE(new_mtu);
3304 /* check if multi-buffer needs to be enabled */
3305 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
3306 netif_running(bnad->netdev)) {
3307 /* only when transition is over 4K */
3308 if ((frame <= 4096 && new_frame > 4096) ||
3309 (frame > 4096 && new_frame <= 4096))
3310 rx_count = bnad_reinit_rx(bnad);
3313 /* rx_count > 0 - new rx created
3314 * - Linux set err = 0 and return
3316 err = bnad_mtu_set(bnad, new_frame);
3320 mutex_unlock(&bnad->conf_mutex);
3325 bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3327 struct bnad *bnad = netdev_priv(netdev);
3328 unsigned long flags;
3330 if (!bnad->rx_info[0].rx)
3333 mutex_lock(&bnad->conf_mutex);
3335 spin_lock_irqsave(&bnad->bna_lock, flags);
3336 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
3337 set_bit(vid, bnad->active_vlans);
3338 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3340 mutex_unlock(&bnad->conf_mutex);
3346 bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3348 struct bnad *bnad = netdev_priv(netdev);
3349 unsigned long flags;
3351 if (!bnad->rx_info[0].rx)
3354 mutex_lock(&bnad->conf_mutex);
3356 spin_lock_irqsave(&bnad->bna_lock, flags);
3357 clear_bit(vid, bnad->active_vlans);
3358 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3359 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3361 mutex_unlock(&bnad->conf_mutex);
3366 static int bnad_set_features(struct net_device *dev, netdev_features_t features)
3368 struct bnad *bnad = netdev_priv(dev);
3369 netdev_features_t changed = features ^ dev->features;
3371 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) {
3372 unsigned long flags;
3374 spin_lock_irqsave(&bnad->bna_lock, flags);
3376 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3377 bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
3379 bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
3381 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3387 #ifdef CONFIG_NET_POLL_CONTROLLER
3389 bnad_netpoll(struct net_device *netdev)
3391 struct bnad *bnad = netdev_priv(netdev);
3392 struct bnad_rx_info *rx_info;
3393 struct bnad_rx_ctrl *rx_ctrl;
3397 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3398 bna_intx_disable(&bnad->bna, curr_mask);
3399 bnad_isr(bnad->pcidev->irq, netdev);
3400 bna_intx_enable(&bnad->bna, curr_mask);
3403 * Tx processing may happen in sending context, so no need
3404 * to explicitly process completions here
3408 for (i = 0; i < bnad->num_rx; i++) {
3409 rx_info = &bnad->rx_info[i];
3412 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3413 rx_ctrl = &rx_info->rx_ctrl[j];
3415 bnad_netif_rx_schedule_poll(bnad,
3423 static const struct net_device_ops bnad_netdev_ops = {
3424 .ndo_open = bnad_open,
3425 .ndo_stop = bnad_stop,
3426 .ndo_start_xmit = bnad_start_xmit,
3427 .ndo_get_stats64 = bnad_get_stats64,
3428 .ndo_set_rx_mode = bnad_set_rx_mode,
3429 .ndo_validate_addr = eth_validate_addr,
3430 .ndo_set_mac_address = bnad_set_mac_address,
3431 .ndo_change_mtu = bnad_change_mtu,
3432 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
3433 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
3434 .ndo_set_features = bnad_set_features,
3435 #ifdef CONFIG_NET_POLL_CONTROLLER
3436 .ndo_poll_controller = bnad_netpoll
3441 bnad_netdev_init(struct bnad *bnad, bool using_dac)
3443 struct net_device *netdev = bnad->netdev;
3445 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3446 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3447 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX |
3448 NETIF_F_HW_VLAN_CTAG_RX;
3450 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3451 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3452 NETIF_F_TSO | NETIF_F_TSO6;
3454 netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
3457 netdev->features |= NETIF_F_HIGHDMA;
3459 netdev->mem_start = bnad->mmio_start;
3460 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3462 netdev->netdev_ops = &bnad_netdev_ops;
3463 bnad_set_ethtool_ops(netdev);
3467 * 1. Initialize the bnad structure
3468 * 2. Setup netdev pointer in pci_dev
3469 * 3. Initialize no. of TxQ & CQs & MSIX vectors
3470 * 4. Initialize work queue.
3473 bnad_init(struct bnad *bnad,
3474 struct pci_dev *pdev, struct net_device *netdev)
3476 unsigned long flags;
3478 SET_NETDEV_DEV(netdev, &pdev->dev);
3479 pci_set_drvdata(pdev, netdev);
3481 bnad->netdev = netdev;
3482 bnad->pcidev = pdev;
3483 bnad->mmio_start = pci_resource_start(pdev, 0);
3484 bnad->mmio_len = pci_resource_len(pdev, 0);
3485 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3487 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3490 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
3491 (unsigned long long) bnad->mmio_len);
3493 spin_lock_irqsave(&bnad->bna_lock, flags);
3494 if (!bnad_msix_disable)
3495 bnad->cfg_flags = BNAD_CF_MSIX;
3497 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3499 bnad_q_num_init(bnad);
3500 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3502 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3503 (bnad->num_rx * bnad->num_rxp_per_rx) +
3504 BNAD_MAILBOX_MSIX_VECTORS;
3506 bnad->txq_depth = BNAD_TXQ_DEPTH;
3507 bnad->rxq_depth = BNAD_RXQ_DEPTH;
3509 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3510 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3512 sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3513 bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3514 if (!bnad->work_q) {
3515 iounmap(bnad->bar0);
3523 * Must be called after bnad_pci_uninit()
3524 * so that iounmap() and pci_set_drvdata(NULL)
3525 * happens only after PCI uninitialization.
3528 bnad_uninit(struct bnad *bnad)
3531 flush_workqueue(bnad->work_q);
3532 destroy_workqueue(bnad->work_q);
3533 bnad->work_q = NULL;
3537 iounmap(bnad->bar0);
3542 a) Per ioceth mutes used for serializing configuration
3543 changes from OS interface
3544 b) spin lock used to protect bna state machine
3547 bnad_lock_init(struct bnad *bnad)
3549 spin_lock_init(&bnad->bna_lock);
3550 mutex_init(&bnad->conf_mutex);
3551 mutex_init(&bnad_list_mutex);
3555 bnad_lock_uninit(struct bnad *bnad)
3557 mutex_destroy(&bnad->conf_mutex);
3558 mutex_destroy(&bnad_list_mutex);
3561 /* PCI Initialization */
3563 bnad_pci_init(struct bnad *bnad,
3564 struct pci_dev *pdev, bool *using_dac)
3568 err = pci_enable_device(pdev);
3571 err = pci_request_regions(pdev, BNAD_NAME);
3573 goto disable_device;
3574 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3577 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3579 goto release_regions;
3582 pci_set_master(pdev);
3586 pci_release_regions(pdev);
3588 pci_disable_device(pdev);
3594 bnad_pci_uninit(struct pci_dev *pdev)
3596 pci_release_regions(pdev);
3597 pci_disable_device(pdev);
3601 bnad_pci_probe(struct pci_dev *pdev,
3602 const struct pci_device_id *pcidev_id)
3608 struct net_device *netdev;
3609 struct bfa_pcidev pcidev_info;
3610 unsigned long flags;
3612 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3613 pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3615 mutex_lock(&bnad_fwimg_mutex);
3616 if (!cna_get_firmware_buf(pdev)) {
3617 mutex_unlock(&bnad_fwimg_mutex);
3618 pr_warn("Failed to load Firmware Image!\n");
3621 mutex_unlock(&bnad_fwimg_mutex);
3624 * Allocates sizeof(struct net_device + struct bnad)
3625 * bnad = netdev->priv
3627 netdev = alloc_etherdev(sizeof(struct bnad));
3632 bnad = netdev_priv(netdev);
3633 bnad_lock_init(bnad);
3634 bnad_add_to_list(bnad);
3636 mutex_lock(&bnad->conf_mutex);
3638 * PCI initialization
3639 * Output : using_dac = 1 for 64 bit DMA
3640 * = 0 for 32 bit DMA
3643 err = bnad_pci_init(bnad, pdev, &using_dac);
3648 * Initialize bnad structure
3649 * Setup relation between pci_dev & netdev
3651 err = bnad_init(bnad, pdev, netdev);
3655 /* Initialize netdev structure, set up ethtool ops */
3656 bnad_netdev_init(bnad, using_dac);
3658 /* Set link to down state */
3659 netif_carrier_off(netdev);
3661 /* Setup the debugfs node for this bfad */
3662 if (bna_debugfs_enable)
3663 bnad_debugfs_init(bnad);
3665 /* Get resource requirement form bna */
3666 spin_lock_irqsave(&bnad->bna_lock, flags);
3667 bna_res_req(&bnad->res_info[0]);
3668 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3670 /* Allocate resources from bna */
3671 err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3677 /* Setup pcidev_info for bna_init() */
3678 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3679 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3680 pcidev_info.device_id = bnad->pcidev->device;
3681 pcidev_info.pci_bar_kva = bnad->bar0;
3683 spin_lock_irqsave(&bnad->bna_lock, flags);
3684 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3685 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3687 bnad->stats.bna_stats = &bna->stats;
3689 bnad_enable_msix(bnad);
3690 err = bnad_mbox_irq_alloc(bnad);
3695 setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
3696 ((unsigned long)bnad));
3697 setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
3698 ((unsigned long)bnad));
3699 setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
3700 ((unsigned long)bnad));
3701 setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3702 ((unsigned long)bnad));
3704 /* Now start the timer before calling IOC */
3705 mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
3706 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3710 * If the call back comes with error, we bail out.
3711 * This is a catastrophic error.
3713 err = bnad_ioceth_enable(bnad);
3715 pr_err("BNA: Initialization failed err=%d\n",
3720 spin_lock_irqsave(&bnad->bna_lock, flags);
3721 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3722 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3723 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3724 bna_attr(bna)->num_rxp - 1);
3725 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3726 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3729 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3731 goto disable_ioceth;
3733 spin_lock_irqsave(&bnad->bna_lock, flags);
3734 bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3735 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3737 err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3740 goto disable_ioceth;
3743 spin_lock_irqsave(&bnad->bna_lock, flags);
3744 bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3745 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3747 /* Get the burnt-in mac */
3748 spin_lock_irqsave(&bnad->bna_lock, flags);
3749 bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
3750 bnad_set_netdev_perm_addr(bnad);
3751 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3753 mutex_unlock(&bnad->conf_mutex);
3755 /* Finally, reguister with net_device layer */
3756 err = register_netdev(netdev);
3758 pr_err("BNA : Registering with netdev failed\n");
3761 set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3766 mutex_unlock(&bnad->conf_mutex);
3770 mutex_lock(&bnad->conf_mutex);
3771 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3773 bnad_ioceth_disable(bnad);
3774 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3775 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3776 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3777 spin_lock_irqsave(&bnad->bna_lock, flags);
3779 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3780 bnad_mbox_irq_free(bnad);
3781 bnad_disable_msix(bnad);
3783 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3785 /* Remove the debugfs node for this bnad */
3786 kfree(bnad->regdata);
3787 bnad_debugfs_uninit(bnad);
3790 bnad_pci_uninit(pdev);
3792 mutex_unlock(&bnad->conf_mutex);
3793 bnad_remove_from_list(bnad);
3794 bnad_lock_uninit(bnad);
3795 free_netdev(netdev);
3800 bnad_pci_remove(struct pci_dev *pdev)
3802 struct net_device *netdev = pci_get_drvdata(pdev);
3805 unsigned long flags;
3810 pr_info("%s bnad_pci_remove\n", netdev->name);
3811 bnad = netdev_priv(netdev);
3814 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3815 unregister_netdev(netdev);
3817 mutex_lock(&bnad->conf_mutex);
3818 bnad_ioceth_disable(bnad);
3819 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3820 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3821 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3822 spin_lock_irqsave(&bnad->bna_lock, flags);
3824 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3826 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3827 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3828 bnad_mbox_irq_free(bnad);
3829 bnad_disable_msix(bnad);
3830 bnad_pci_uninit(pdev);
3831 mutex_unlock(&bnad->conf_mutex);
3832 bnad_remove_from_list(bnad);
3833 bnad_lock_uninit(bnad);
3834 /* Remove the debugfs node for this bnad */
3835 kfree(bnad->regdata);
3836 bnad_debugfs_uninit(bnad);
3838 free_netdev(netdev);
3841 static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
3843 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3844 PCI_DEVICE_ID_BROCADE_CT),
3845 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3846 .class_mask = 0xffff00
3849 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3850 BFA_PCI_DEVICE_ID_CT2),
3851 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3852 .class_mask = 0xffff00
3857 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3859 static struct pci_driver bnad_pci_driver = {
3861 .id_table = bnad_pci_id_table,
3862 .probe = bnad_pci_probe,
3863 .remove = bnad_pci_remove,
3867 bnad_module_init(void)
3871 pr_info("Brocade 10G Ethernet driver - version: %s\n",
3874 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3876 err = pci_register_driver(&bnad_pci_driver);
3878 pr_err("bna : PCI registration failed in module init "
3887 bnad_module_exit(void)
3889 pci_unregister_driver(&bnad_pci_driver);
3890 release_firmware(bfi_fw);
3893 module_init(bnad_module_init);
3894 module_exit(bnad_module_exit);
3896 MODULE_AUTHOR("Brocade");
3897 MODULE_LICENSE("GPL");
3898 MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3899 MODULE_VERSION(BNAD_VERSION);
3900 MODULE_FIRMWARE(CNA_FW_FILE_CT);
3901 MODULE_FIRMWARE(CNA_FW_FILE_CT2);