2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
18 #include <linux/netdevice.h>
19 #include <linux/skbuff.h>
20 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/if_vlan.h>
24 #include <linux/if_ether.h>
31 DEFINE_MUTEX(bnad_fwimg_mutex);
36 static uint bnad_msix_disable;
37 module_param(bnad_msix_disable, uint, 0444);
38 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
40 static uint bnad_ioc_auto_recover = 1;
41 module_param(bnad_ioc_auto_recover, uint, 0444);
42 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
47 u32 bnad_rxqs_per_cq = 2;
49 const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
54 #define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
56 #define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
58 #define BNAD_GET_MBOX_IRQ(_bnad) \
59 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
60 ((_bnad)->msix_table[(_bnad)->msix_num - 1].vector) : \
61 ((_bnad)->pcidev->irq))
63 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
65 (_res_info)->res_type = BNA_RES_T_MEM; \
66 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
67 (_res_info)->res_u.mem_info.num = (_num); \
68 (_res_info)->res_u.mem_info.len = \
69 sizeof(struct bnad_unmap_q) + \
70 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
74 * Reinitialize completions in CQ, once Rx is taken down
77 bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
79 struct bna_cq_entry *cmpl, *next_cmpl;
80 unsigned int wi_range, wis = 0, ccb_prod = 0;
83 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
86 for (i = 0; i < ccb->q_depth; i++) {
88 if (likely(--wi_range))
91 BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
93 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
102 * Frees all pending Tx Bufs
103 * At this point no activity is expected on the Q,
104 * so DMA unmap & freeing is fine.
107 bnad_free_all_txbufs(struct bnad *bnad,
111 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
112 struct bnad_skb_unmap *unmap_array;
113 struct sk_buff *skb = NULL;
116 unmap_array = unmap_q->unmap_array;
119 while (unmap_cons < unmap_q->q_depth) {
120 skb = unmap_array[unmap_cons].skb;
125 unmap_array[unmap_cons].skb = NULL;
127 pci_unmap_single(bnad->pcidev,
128 pci_unmap_addr(&unmap_array[unmap_cons],
129 dma_addr), skb_headlen(skb),
132 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
134 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
135 pci_unmap_page(bnad->pcidev,
136 pci_unmap_addr(&unmap_array[unmap_cons],
138 skb_shinfo(skb)->frags[i].size,
140 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
144 dev_kfree_skb_any(skb);
148 /* Data Path Handlers */
151 * bnad_free_txbufs : Frees the Tx bufs on Tx completion
152 * Can be called in a) Interrupt context
157 bnad_free_txbufs(struct bnad *bnad,
160 u32 sent_packets = 0, sent_bytes = 0;
161 u16 wis, unmap_cons, updated_hw_cons;
162 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
163 struct bnad_skb_unmap *unmap_array;
168 * Just return if TX is stopped. This check is useful
169 * when bnad_free_txbufs() runs out of a tasklet scheduled
170 * before bnad_cb_tx_cleanup() cleared BNAD_RF_TX_STARTED bit
171 * but this routine runs actually after the cleanup has been
174 if (!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))
177 updated_hw_cons = *(tcb->hw_consumer_index);
179 wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
180 updated_hw_cons, tcb->q_depth);
182 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
184 unmap_array = unmap_q->unmap_array;
185 unmap_cons = unmap_q->consumer_index;
187 prefetch(&unmap_array[unmap_cons + 1]);
189 skb = unmap_array[unmap_cons].skb;
191 unmap_array[unmap_cons].skb = NULL;
194 sent_bytes += skb->len;
195 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
197 pci_unmap_single(bnad->pcidev,
198 pci_unmap_addr(&unmap_array[unmap_cons],
199 dma_addr), skb_headlen(skb),
201 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
202 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
204 prefetch(&unmap_array[unmap_cons + 1]);
205 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
206 prefetch(&unmap_array[unmap_cons + 1]);
208 pci_unmap_page(bnad->pcidev,
209 pci_unmap_addr(&unmap_array[unmap_cons],
211 skb_shinfo(skb)->frags[i].size,
213 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
215 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
217 dev_kfree_skb_any(skb);
220 /* Update consumer pointers. */
221 tcb->consumer_index = updated_hw_cons;
222 unmap_q->consumer_index = unmap_cons;
224 tcb->txq->tx_packets += sent_packets;
225 tcb->txq->tx_bytes += sent_bytes;
230 /* Tx Free Tasklet function */
231 /* Frees for all the tcb's in all the Tx's */
233 * Scheduled from sending context, so that
234 * the fat Tx lock is not held for too long
235 * in the sending context.
238 bnad_tx_free_tasklet(unsigned long bnad_ptr)
240 struct bnad *bnad = (struct bnad *)bnad_ptr;
245 for (i = 0; i < bnad->num_tx; i++) {
246 for (j = 0; j < bnad->num_txq_per_tx; j++) {
247 tcb = bnad->tx_info[i].tcb[j];
250 if (((u16) (*tcb->hw_consumer_index) !=
251 tcb->consumer_index) &&
252 (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
254 acked = bnad_free_txbufs(bnad, tcb);
255 bna_ib_ack(tcb->i_dbell, acked);
256 smp_mb__before_clear_bit();
257 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
264 bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
266 struct net_device *netdev = bnad->netdev;
269 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
272 sent = bnad_free_txbufs(bnad, tcb);
274 if (netif_queue_stopped(netdev) &&
275 netif_carrier_ok(netdev) &&
276 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
277 BNAD_NETIF_WAKE_THRESHOLD) {
278 netif_wake_queue(netdev);
279 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
281 bna_ib_ack(tcb->i_dbell, sent);
283 bna_ib_ack(tcb->i_dbell, 0);
285 smp_mb__before_clear_bit();
286 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
291 /* MSIX Tx Completion Handler */
293 bnad_msix_tx(int irq, void *data)
295 struct bna_tcb *tcb = (struct bna_tcb *)data;
296 struct bnad *bnad = tcb->bnad;
304 bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
306 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
308 rcb->producer_index = 0;
309 rcb->consumer_index = 0;
311 unmap_q->producer_index = 0;
312 unmap_q->consumer_index = 0;
316 bnad_free_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
318 struct bnad_unmap_q *unmap_q;
321 unmap_q = rcb->unmap_q;
322 while (BNA_QE_IN_USE_CNT(unmap_q, unmap_q->q_depth)) {
323 skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
325 unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
326 pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q->
327 unmap_array[unmap_q->consumer_index],
328 dma_addr), rcb->rxq->buffer_size +
329 NET_IP_ALIGN, PCI_DMA_FROMDEVICE);
331 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
332 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
335 bnad_reset_rcb(bnad, rcb);
339 bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
341 u16 to_alloc, alloced, unmap_prod, wi_range;
342 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
343 struct bnad_skb_unmap *unmap_array;
344 struct bna_rxq_entry *rxent;
350 BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
352 unmap_array = unmap_q->unmap_array;
353 unmap_prod = unmap_q->producer_index;
355 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
359 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
362 skb = alloc_skb(rcb->rxq->buffer_size + NET_IP_ALIGN,
364 if (unlikely(!skb)) {
365 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
368 skb->dev = bnad->netdev;
369 skb_reserve(skb, NET_IP_ALIGN);
370 unmap_array[unmap_prod].skb = skb;
371 dma_addr = pci_map_single(bnad->pcidev, skb->data,
372 rcb->rxq->buffer_size, PCI_DMA_FROMDEVICE);
373 pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
375 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
376 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
384 if (likely(alloced)) {
385 unmap_q->producer_index = unmap_prod;
386 rcb->producer_index = unmap_prod;
388 bna_rxq_prod_indx_doorbell(rcb);
393 * Locking is required in the enable path
394 * because it is called from a napi poll
395 * context, where the bna_lock is not held
396 * unlike the IRQ context.
399 bnad_enable_txrx_irqs(struct bnad *bnad)
406 spin_lock_irqsave(&bnad->bna_lock, flags);
407 for (i = 0; i < bnad->num_tx; i++) {
408 for (j = 0; j < bnad->num_txq_per_tx; j++) {
409 tcb = bnad->tx_info[i].tcb[j];
410 bna_ib_coalescing_timer_set(tcb->i_dbell,
411 tcb->txq->ib->ib_config.coalescing_timeo);
412 bna_ib_ack(tcb->i_dbell, 0);
416 for (i = 0; i < bnad->num_rx; i++) {
417 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
418 ccb = bnad->rx_info[i].rx_ctrl[j].ccb;
419 bnad_enable_rx_irq_unsafe(ccb);
422 spin_unlock_irqrestore(&bnad->bna_lock, flags);
426 bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
428 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
430 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
431 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
432 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
433 bnad_alloc_n_post_rxbufs(bnad, rcb);
434 smp_mb__before_clear_bit();
435 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
440 bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
442 struct bna_cq_entry *cmpl, *next_cmpl;
443 struct bna_rcb *rcb = NULL;
444 unsigned int wi_range, packets = 0, wis = 0;
445 struct bnad_unmap_q *unmap_q;
448 u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
449 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
451 prefetch(bnad->netdev);
452 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
454 BUG_ON(!(wi_range <= ccb->q_depth));
455 while (cmpl->valid && packets < budget) {
457 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
459 if (qid0 == cmpl->rxq_id)
464 unmap_q = rcb->unmap_q;
466 skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
468 unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
469 pci_unmap_single(bnad->pcidev,
470 pci_unmap_addr(&unmap_q->
471 unmap_array[unmap_q->
474 rcb->rxq->buffer_size,
476 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
478 /* Should be more efficient ? Performance ? */
479 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
482 if (likely(--wi_range))
483 next_cmpl = cmpl + 1;
485 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
487 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
488 next_cmpl, wi_range);
489 BUG_ON(!(wi_range <= ccb->q_depth));
493 flags = ntohl(cmpl->flags);
496 (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
497 BNA_CQ_EF_TOO_LONG))) {
498 dev_kfree_skb_any(skb);
499 rcb->rxq->rx_packets_with_error++;
503 skb_put(skb, ntohs(cmpl->length));
506 (((flags & BNA_CQ_EF_IPV4) &&
507 (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
508 (flags & BNA_CQ_EF_IPV6)) &&
509 (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
510 (flags & BNA_CQ_EF_L4_CKSUM_OK)))
511 skb->ip_summed = CHECKSUM_UNNECESSARY;
513 skb_checksum_none_assert(skb);
515 rcb->rxq->rx_packets++;
516 rcb->rxq->rx_bytes += skb->len;
517 skb->protocol = eth_type_trans(skb, bnad->netdev);
519 if (bnad->vlan_grp && (flags & BNA_CQ_EF_VLAN)) {
520 struct bnad_rx_ctrl *rx_ctrl =
521 (struct bnad_rx_ctrl *)ccb->ctrl;
522 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
523 vlan_gro_receive(&rx_ctrl->napi, bnad->vlan_grp,
524 ntohs(cmpl->vlan_tag), skb);
526 vlan_hwaccel_receive_skb(skb,
528 ntohs(cmpl->vlan_tag));
530 } else { /* Not VLAN tagged/stripped */
531 struct bnad_rx_ctrl *rx_ctrl =
532 (struct bnad_rx_ctrl *)ccb->ctrl;
533 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
534 napi_gro_receive(&rx_ctrl->napi, skb);
536 netif_receive_skb(skb);
544 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
547 bna_ib_ack(ccb->i_dbell, packets);
548 bnad_refill_rxq(bnad, ccb->rcb[0]);
550 bnad_refill_rxq(bnad, ccb->rcb[1]);
552 bna_ib_ack(ccb->i_dbell, 0);
558 bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
560 bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
561 bna_ib_ack(ccb->i_dbell, 0);
565 bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
567 spin_lock_irq(&bnad->bna_lock); /* Because of polling context */
568 bnad_enable_rx_irq_unsafe(ccb);
569 spin_unlock_irq(&bnad->bna_lock);
573 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
575 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
576 if (likely(napi_schedule_prep((&rx_ctrl->napi)))) {
577 bnad_disable_rx_irq(bnad, ccb);
578 __napi_schedule((&rx_ctrl->napi));
580 BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
583 /* MSIX Rx Path Handler */
585 bnad_msix_rx(int irq, void *data)
587 struct bna_ccb *ccb = (struct bna_ccb *)data;
588 struct bnad *bnad = ccb->bnad;
590 bnad_netif_rx_schedule_poll(bnad, ccb);
595 /* Interrupt handlers */
597 /* Mbox Interrupt Handlers */
599 bnad_msix_mbox_handler(int irq, void *data)
603 struct net_device *netdev = data;
606 bnad = netdev_priv(netdev);
608 /* BNA_ISR_GET(bnad); Inc Ref count */
609 spin_lock_irqsave(&bnad->bna_lock, flags);
611 bna_intr_status_get(&bnad->bna, intr_status);
613 if (BNA_IS_MBOX_ERR_INTR(intr_status))
614 bna_mbox_handler(&bnad->bna, intr_status);
616 spin_unlock_irqrestore(&bnad->bna_lock, flags);
618 /* BNAD_ISR_PUT(bnad); Dec Ref count */
623 bnad_isr(int irq, void *data)
628 struct net_device *netdev = data;
629 struct bnad *bnad = netdev_priv(netdev);
630 struct bnad_rx_info *rx_info;
631 struct bnad_rx_ctrl *rx_ctrl;
633 spin_lock_irqsave(&bnad->bna_lock, flags);
635 bna_intr_status_get(&bnad->bna, intr_status);
637 spin_unlock_irqrestore(&bnad->bna_lock, flags);
641 if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
642 bna_mbox_handler(&bnad->bna, intr_status);
643 if (!BNA_IS_INTX_DATA_INTR(intr_status)) {
644 spin_unlock_irqrestore(&bnad->bna_lock, flags);
648 spin_unlock_irqrestore(&bnad->bna_lock, flags);
650 /* Process data interrupts */
651 for (i = 0; i < bnad->num_rx; i++) {
652 rx_info = &bnad->rx_info[i];
655 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
656 rx_ctrl = &rx_info->rx_ctrl[j];
658 bnad_netif_rx_schedule_poll(bnad,
667 * Called in interrupt / callback context
668 * with bna_lock held, so cfg_flags access is OK
671 bnad_enable_mbox_irq(struct bnad *bnad)
673 int irq = BNAD_GET_MBOX_IRQ(bnad);
675 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
678 if (test_and_clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
680 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
684 * Called with bnad->bna_lock held b'cos of
685 * bnad->cfg_flags access.
688 bnad_disable_mbox_irq(struct bnad *bnad)
690 int irq = BNAD_GET_MBOX_IRQ(bnad);
692 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
695 if (!test_and_set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
696 disable_irq_nosync(irq);
697 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
700 /* Control Path Handlers */
704 bnad_cb_device_enable_mbox_intr(struct bnad *bnad)
706 bnad_enable_mbox_irq(bnad);
710 bnad_cb_device_disable_mbox_intr(struct bnad *bnad)
712 bnad_disable_mbox_irq(bnad);
716 bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status)
718 complete(&bnad->bnad_completions.ioc_comp);
719 bnad->bnad_completions.ioc_comp_status = status;
723 bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status)
725 complete(&bnad->bnad_completions.ioc_comp);
726 bnad->bnad_completions.ioc_comp_status = status;
730 bnad_cb_port_disabled(void *arg, enum bna_cb_status status)
732 struct bnad *bnad = (struct bnad *)arg;
734 complete(&bnad->bnad_completions.port_comp);
736 netif_carrier_off(bnad->netdev);
740 bnad_cb_port_link_status(struct bnad *bnad,
741 enum bna_link_status link_status)
745 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
747 if (link_status == BNA_CEE_UP) {
748 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
749 BNAD_UPDATE_CTR(bnad, cee_up);
751 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
754 if (!netif_carrier_ok(bnad->netdev)) {
755 pr_warn("bna: %s link up\n",
757 netif_carrier_on(bnad->netdev);
758 BNAD_UPDATE_CTR(bnad, link_toggle);
759 if (test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) {
760 /* Force an immediate Transmit Schedule */
761 pr_info("bna: %s TX_STARTED\n",
763 netif_wake_queue(bnad->netdev);
764 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
766 netif_stop_queue(bnad->netdev);
767 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
771 if (netif_carrier_ok(bnad->netdev)) {
772 pr_warn("bna: %s link down\n",
774 netif_carrier_off(bnad->netdev);
775 BNAD_UPDATE_CTR(bnad, link_toggle);
781 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx,
782 enum bna_cb_status status)
784 struct bnad *bnad = (struct bnad *)arg;
786 complete(&bnad->bnad_completions.tx_comp);
790 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
792 struct bnad_tx_info *tx_info =
793 (struct bnad_tx_info *)tcb->txq->tx->priv;
794 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
796 tx_info->tcb[tcb->id] = tcb;
797 unmap_q->producer_index = 0;
798 unmap_q->consumer_index = 0;
799 unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
803 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
805 struct bnad_tx_info *tx_info =
806 (struct bnad_tx_info *)tcb->txq->tx->priv;
808 tx_info->tcb[tcb->id] = NULL;
812 bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
814 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
816 unmap_q->producer_index = 0;
817 unmap_q->consumer_index = 0;
818 unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
822 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
824 struct bnad_rx_info *rx_info =
825 (struct bnad_rx_info *)ccb->cq->rx->priv;
827 rx_info->rx_ctrl[ccb->id].ccb = ccb;
828 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
832 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
834 struct bnad_rx_info *rx_info =
835 (struct bnad_rx_info *)ccb->cq->rx->priv;
837 rx_info->rx_ctrl[ccb->id].ccb = NULL;
841 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
843 struct bnad_tx_info *tx_info =
844 (struct bnad_tx_info *)tcb->txq->tx->priv;
846 if (tx_info != &bnad->tx_info[0])
849 clear_bit(BNAD_RF_TX_STARTED, &bnad->run_flags);
850 netif_stop_queue(bnad->netdev);
851 pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name);
855 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb)
857 if (test_and_set_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))
860 if (netif_carrier_ok(bnad->netdev)) {
861 pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
862 netif_wake_queue(bnad->netdev);
863 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
868 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
870 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
872 if (!tcb || (!tcb->unmap_q))
875 if (!unmap_q->unmap_array)
878 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
881 bnad_free_all_txbufs(bnad, tcb);
883 unmap_q->producer_index = 0;
884 unmap_q->consumer_index = 0;
886 smp_mb__before_clear_bit();
887 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
891 bnad_cb_rx_cleanup(struct bnad *bnad,
894 bnad_cq_cmpl_init(bnad, ccb);
896 bnad_free_rxbufs(bnad, ccb->rcb[0]);
897 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
900 bnad_free_rxbufs(bnad, ccb->rcb[1]);
901 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
906 bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb)
908 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
910 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
912 /* Now allocate & post buffers for this RCB */
913 /* !!Allocation in callback context */
914 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
915 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
916 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
917 bnad_alloc_n_post_rxbufs(bnad, rcb);
918 smp_mb__before_clear_bit();
919 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
924 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx,
925 enum bna_cb_status status)
927 struct bnad *bnad = (struct bnad *)arg;
929 complete(&bnad->bnad_completions.rx_comp);
933 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx,
934 enum bna_cb_status status)
936 bnad->bnad_completions.mcast_comp_status = status;
937 complete(&bnad->bnad_completions.mcast_comp);
941 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
942 struct bna_stats *stats)
944 if (status == BNA_CB_SUCCESS)
945 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
947 if (!netif_running(bnad->netdev) ||
948 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
951 mod_timer(&bnad->stats_timer,
952 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
956 bnad_cb_stats_clr(struct bnad *bnad)
960 /* Resource allocation, free functions */
963 bnad_mem_free(struct bnad *bnad,
964 struct bna_mem_info *mem_info)
969 if (mem_info->mdl == NULL)
972 for (i = 0; i < mem_info->num; i++) {
973 if (mem_info->mdl[i].kva != NULL) {
974 if (mem_info->mem_type == BNA_MEM_T_DMA) {
975 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
977 pci_free_consistent(bnad->pcidev,
978 mem_info->mdl[i].len,
979 mem_info->mdl[i].kva, dma_pa);
981 kfree(mem_info->mdl[i].kva);
984 kfree(mem_info->mdl);
985 mem_info->mdl = NULL;
989 bnad_mem_alloc(struct bnad *bnad,
990 struct bna_mem_info *mem_info)
995 if ((mem_info->num == 0) || (mem_info->len == 0)) {
996 mem_info->mdl = NULL;
1000 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1002 if (mem_info->mdl == NULL)
1005 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1006 for (i = 0; i < mem_info->num; i++) {
1007 mem_info->mdl[i].len = mem_info->len;
1008 mem_info->mdl[i].kva =
1009 pci_alloc_consistent(bnad->pcidev,
1010 mem_info->len, &dma_pa);
1012 if (mem_info->mdl[i].kva == NULL)
1015 BNA_SET_DMA_ADDR(dma_pa,
1016 &(mem_info->mdl[i].dma));
1019 for (i = 0; i < mem_info->num; i++) {
1020 mem_info->mdl[i].len = mem_info->len;
1021 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1023 if (mem_info->mdl[i].kva == NULL)
1031 bnad_mem_free(bnad, mem_info);
1035 /* Free IRQ for Mailbox */
1037 bnad_mbox_irq_free(struct bnad *bnad,
1038 struct bna_intr_info *intr_info)
1041 unsigned long flags;
1043 if (intr_info->idl == NULL)
1046 spin_lock_irqsave(&bnad->bna_lock, flags);
1048 bnad_disable_mbox_irq(bnad);
1050 irq = BNAD_GET_MBOX_IRQ(bnad);
1051 free_irq(irq, bnad->netdev);
1053 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1055 kfree(intr_info->idl);
1059 * Allocates IRQ for Mailbox, but keep it disabled
1060 * This will be enabled once we get the mbox enable callback
1064 bnad_mbox_irq_alloc(struct bnad *bnad,
1065 struct bna_intr_info *intr_info)
1068 unsigned long flags;
1070 irq_handler_t irq_handler;
1072 /* Mbox should use only 1 vector */
1074 intr_info->idl = kzalloc(sizeof(*(intr_info->idl)), GFP_KERNEL);
1075 if (!intr_info->idl)
1078 spin_lock_irqsave(&bnad->bna_lock, flags);
1079 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1080 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1081 irq = bnad->msix_table[bnad->msix_num - 1].vector;
1083 intr_info->intr_type = BNA_INTR_T_MSIX;
1084 intr_info->idl[0].vector = bnad->msix_num - 1;
1086 irq_handler = (irq_handler_t)bnad_isr;
1087 irq = bnad->pcidev->irq;
1088 flags = IRQF_SHARED;
1089 intr_info->intr_type = BNA_INTR_T_INTX;
1090 /* intr_info->idl.vector = 0 ? */
1092 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1094 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1096 err = request_irq(irq, irq_handler, flags,
1097 bnad->mbox_irq_name, bnad->netdev);
1099 kfree(intr_info->idl);
1100 intr_info->idl = NULL;
1104 spin_lock_irqsave(&bnad->bna_lock, flags);
1105 bnad_disable_mbox_irq(bnad);
1106 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1111 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1113 kfree(intr_info->idl);
1114 intr_info->idl = NULL;
1117 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1119 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1120 uint txrx_id, struct bna_intr_info *intr_info)
1122 int i, vector_start = 0;
1124 unsigned long flags;
1126 spin_lock_irqsave(&bnad->bna_lock, flags);
1127 cfg_flags = bnad->cfg_flags;
1128 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1130 if (cfg_flags & BNAD_CF_MSIX) {
1131 intr_info->intr_type = BNA_INTR_T_MSIX;
1132 intr_info->idl = kcalloc(intr_info->num,
1133 sizeof(struct bna_intr_descr),
1135 if (!intr_info->idl)
1140 vector_start = txrx_id;
1144 vector_start = bnad->num_tx * bnad->num_txq_per_tx +
1152 for (i = 0; i < intr_info->num; i++)
1153 intr_info->idl[i].vector = vector_start + i;
1155 intr_info->intr_type = BNA_INTR_T_INTX;
1157 intr_info->idl = kcalloc(intr_info->num,
1158 sizeof(struct bna_intr_descr),
1160 if (!intr_info->idl)
1165 intr_info->idl[0].vector = 0x1; /* Bit mask : Tx IB */
1169 intr_info->idl[0].vector = 0x2; /* Bit mask : Rx IB */
1177 * NOTE: Should be called for MSIX only
1178 * Unregisters Tx MSIX vector(s) from the kernel
1181 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1187 for (i = 0; i < num_txqs; i++) {
1188 if (tx_info->tcb[i] == NULL)
1191 vector_num = tx_info->tcb[i]->intr_vector;
1192 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1197 * NOTE: Should be called for MSIX only
1198 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1201 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1202 uint tx_id, int num_txqs)
1208 for (i = 0; i < num_txqs; i++) {
1209 vector_num = tx_info->tcb[i]->intr_vector;
1210 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1211 tx_id + tx_info->tcb[i]->id);
1212 err = request_irq(bnad->msix_table[vector_num].vector,
1213 (irq_handler_t)bnad_msix_tx, 0,
1214 tx_info->tcb[i]->name,
1224 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1229 * NOTE: Should be called for MSIX only
1230 * Unregisters Rx MSIX vector(s) from the kernel
1233 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1239 for (i = 0; i < num_rxps; i++) {
1240 if (rx_info->rx_ctrl[i].ccb == NULL)
1243 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1244 free_irq(bnad->msix_table[vector_num].vector,
1245 rx_info->rx_ctrl[i].ccb);
1250 * NOTE: Should be called for MSIX only
1251 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1254 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1255 uint rx_id, int num_rxps)
1261 for (i = 0; i < num_rxps; i++) {
1262 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1263 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1265 rx_id + rx_info->rx_ctrl[i].ccb->id);
1266 err = request_irq(bnad->msix_table[vector_num].vector,
1267 (irq_handler_t)bnad_msix_rx, 0,
1268 rx_info->rx_ctrl[i].ccb->name,
1269 rx_info->rx_ctrl[i].ccb);
1278 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1282 /* Free Tx object Resources */
1284 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1288 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1289 if (res_info[i].res_type == BNA_RES_T_MEM)
1290 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1291 else if (res_info[i].res_type == BNA_RES_T_INTR)
1292 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1296 /* Allocates memory and interrupt resources for Tx object */
1298 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1303 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1304 if (res_info[i].res_type == BNA_RES_T_MEM)
1305 err = bnad_mem_alloc(bnad,
1306 &res_info[i].res_u.mem_info);
1307 else if (res_info[i].res_type == BNA_RES_T_INTR)
1308 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1309 &res_info[i].res_u.intr_info);
1316 bnad_tx_res_free(bnad, res_info);
1320 /* Free Rx object Resources */
1322 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1326 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1327 if (res_info[i].res_type == BNA_RES_T_MEM)
1328 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1329 else if (res_info[i].res_type == BNA_RES_T_INTR)
1330 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1334 /* Allocates memory and interrupt resources for Rx object */
1336 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1341 /* All memory needs to be allocated before setup_ccbs */
1342 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1343 if (res_info[i].res_type == BNA_RES_T_MEM)
1344 err = bnad_mem_alloc(bnad,
1345 &res_info[i].res_u.mem_info);
1346 else if (res_info[i].res_type == BNA_RES_T_INTR)
1347 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1348 &res_info[i].res_u.intr_info);
1355 bnad_rx_res_free(bnad, res_info);
1359 /* Timer callbacks */
1362 bnad_ioc_timeout(unsigned long data)
1364 struct bnad *bnad = (struct bnad *)data;
1365 unsigned long flags;
1367 spin_lock_irqsave(&bnad->bna_lock, flags);
1368 bfa_nw_ioc_timeout((void *) &bnad->bna.device.ioc);
1369 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1373 bnad_ioc_hb_check(unsigned long data)
1375 struct bnad *bnad = (struct bnad *)data;
1376 unsigned long flags;
1378 spin_lock_irqsave(&bnad->bna_lock, flags);
1379 bfa_nw_ioc_hb_check((void *) &bnad->bna.device.ioc);
1380 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1384 bnad_ioc_sem_timeout(unsigned long data)
1386 struct bnad *bnad = (struct bnad *)data;
1387 unsigned long flags;
1389 spin_lock_irqsave(&bnad->bna_lock, flags);
1390 bfa_nw_ioc_sem_timeout((void *) &bnad->bna.device.ioc);
1391 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1395 * All timer routines use bnad->bna_lock to protect against
1396 * the following race, which may occur in case of no locking:
1404 /* b) Dynamic Interrupt Moderation Timer */
1406 bnad_dim_timeout(unsigned long data)
1408 struct bnad *bnad = (struct bnad *)data;
1409 struct bnad_rx_info *rx_info;
1410 struct bnad_rx_ctrl *rx_ctrl;
1412 unsigned long flags;
1414 if (!netif_carrier_ok(bnad->netdev))
1417 spin_lock_irqsave(&bnad->bna_lock, flags);
1418 for (i = 0; i < bnad->num_rx; i++) {
1419 rx_info = &bnad->rx_info[i];
1422 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1423 rx_ctrl = &rx_info->rx_ctrl[j];
1426 bna_rx_dim_update(rx_ctrl->ccb);
1430 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1431 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1432 mod_timer(&bnad->dim_timer,
1433 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1434 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1437 /* c) Statistics Timer */
1439 bnad_stats_timeout(unsigned long data)
1441 struct bnad *bnad = (struct bnad *)data;
1442 unsigned long flags;
1444 if (!netif_running(bnad->netdev) ||
1445 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1448 spin_lock_irqsave(&bnad->bna_lock, flags);
1449 bna_stats_get(&bnad->bna);
1450 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1454 * Set up timer for DIM
1455 * Called with bnad->bna_lock held
1458 bnad_dim_timer_start(struct bnad *bnad)
1460 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1461 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1462 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1463 (unsigned long)bnad);
1464 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1465 mod_timer(&bnad->dim_timer,
1466 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1471 * Set up timer for statistics
1472 * Called with mutex_lock(&bnad->conf_mutex) held
1475 bnad_stats_timer_start(struct bnad *bnad)
1477 unsigned long flags;
1479 spin_lock_irqsave(&bnad->bna_lock, flags);
1480 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1481 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1482 (unsigned long)bnad);
1483 mod_timer(&bnad->stats_timer,
1484 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1486 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1491 * Stops the stats timer
1492 * Called with mutex_lock(&bnad->conf_mutex) held
1495 bnad_stats_timer_stop(struct bnad *bnad)
1498 unsigned long flags;
1500 spin_lock_irqsave(&bnad->bna_lock, flags);
1501 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1503 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1505 del_timer_sync(&bnad->stats_timer);
1511 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1513 int i = 1; /* Index 0 has broadcast address */
1514 struct netdev_hw_addr *mc_addr;
1516 netdev_for_each_mc_addr(mc_addr, netdev) {
1517 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1524 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1526 struct bnad_rx_ctrl *rx_ctrl =
1527 container_of(napi, struct bnad_rx_ctrl, napi);
1528 struct bna_ccb *ccb;
1536 if (!netif_carrier_ok(bnad->netdev))
1539 rcvd = bnad_poll_cq(bnad, ccb, budget);
1544 napi_complete((napi));
1546 BNAD_UPDATE_CTR(bnad, netif_rx_complete);
1548 bnad_enable_rx_irq(bnad, ccb);
1553 bnad_napi_poll_txrx(struct napi_struct *napi, int budget)
1555 struct bnad_rx_ctrl *rx_ctrl =
1556 container_of(napi, struct bnad_rx_ctrl, napi);
1557 struct bna_ccb *ccb;
1566 if (!netif_carrier_ok(bnad->netdev))
1569 /* Handle Tx Completions, if any */
1570 for (i = 0; i < bnad->num_tx; i++) {
1571 for (j = 0; j < bnad->num_txq_per_tx; j++)
1572 bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
1575 /* Handle Rx Completions */
1576 rcvd = bnad_poll_cq(bnad, ccb, budget);
1580 napi_complete((napi));
1582 BNAD_UPDATE_CTR(bnad, netif_rx_complete);
1584 bnad_enable_txrx_irqs(bnad);
1589 bnad_napi_enable(struct bnad *bnad, u32 rx_id)
1591 int (*napi_poll) (struct napi_struct *, int);
1592 struct bnad_rx_ctrl *rx_ctrl;
1594 unsigned long flags;
1596 spin_lock_irqsave(&bnad->bna_lock, flags);
1597 if (bnad->cfg_flags & BNAD_CF_MSIX)
1598 napi_poll = bnad_napi_poll_rx;
1600 napi_poll = bnad_napi_poll_txrx;
1601 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1603 /* Initialize & enable NAPI */
1604 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1605 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1606 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1608 napi_enable(&rx_ctrl->napi);
1613 bnad_napi_disable(struct bnad *bnad, u32 rx_id)
1617 /* First disable and then clean up */
1618 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1619 napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1620 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1624 /* Should be held with conf_lock held */
1626 bnad_cleanup_tx(struct bnad *bnad, uint tx_id)
1628 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1629 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1630 unsigned long flags;
1635 init_completion(&bnad->bnad_completions.tx_comp);
1636 spin_lock_irqsave(&bnad->bna_lock, flags);
1637 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1638 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1639 wait_for_completion(&bnad->bnad_completions.tx_comp);
1641 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1642 bnad_tx_msix_unregister(bnad, tx_info,
1643 bnad->num_txq_per_tx);
1645 spin_lock_irqsave(&bnad->bna_lock, flags);
1646 bna_tx_destroy(tx_info->tx);
1647 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1652 tasklet_kill(&bnad->tx_free_tasklet);
1654 bnad_tx_res_free(bnad, res_info);
1657 /* Should be held with conf_lock held */
1659 bnad_setup_tx(struct bnad *bnad, uint tx_id)
1662 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1663 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1664 struct bna_intr_info *intr_info =
1665 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1666 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1667 struct bna_tx_event_cbfn tx_cbfn;
1669 unsigned long flags;
1671 /* Initialize the Tx object configuration */
1672 tx_config->num_txq = bnad->num_txq_per_tx;
1673 tx_config->txq_depth = bnad->txq_depth;
1674 tx_config->tx_type = BNA_TX_T_REGULAR;
1676 /* Initialize the tx event handlers */
1677 tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
1678 tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
1679 tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
1680 tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
1681 tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
1683 /* Get BNA's resource requirement for one tx object */
1684 spin_lock_irqsave(&bnad->bna_lock, flags);
1685 bna_tx_res_req(bnad->num_txq_per_tx,
1686 bnad->txq_depth, res_info);
1687 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1689 /* Fill Unmap Q memory requirements */
1690 BNAD_FILL_UNMAPQ_MEM_REQ(
1691 &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1692 bnad->num_txq_per_tx,
1693 BNAD_TX_UNMAPQ_DEPTH);
1695 /* Allocate resources */
1696 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1700 /* Ask BNA to create one Tx object, supplying required resources */
1701 spin_lock_irqsave(&bnad->bna_lock, flags);
1702 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1704 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1709 /* Register ISR for the Tx object */
1710 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1711 err = bnad_tx_msix_register(bnad, tx_info,
1712 tx_id, bnad->num_txq_per_tx);
1717 spin_lock_irqsave(&bnad->bna_lock, flags);
1719 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1724 bnad_tx_res_free(bnad, res_info);
1728 /* Setup the rx config for bna_rx_create */
1729 /* bnad decides the configuration */
1731 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1733 rx_config->rx_type = BNA_RX_T_REGULAR;
1734 rx_config->num_paths = bnad->num_rxp_per_rx;
1736 if (bnad->num_rxp_per_rx > 1) {
1737 rx_config->rss_status = BNA_STATUS_T_ENABLED;
1738 rx_config->rss_config.hash_type =
1743 rx_config->rss_config.hash_mask =
1744 bnad->num_rxp_per_rx - 1;
1745 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1746 sizeof(rx_config->rss_config.toeplitz_hash_key));
1748 rx_config->rss_status = BNA_STATUS_T_DISABLED;
1749 memset(&rx_config->rss_config, 0,
1750 sizeof(rx_config->rss_config));
1752 rx_config->rxp_type = BNA_RXP_SLR;
1753 rx_config->q_depth = bnad->rxq_depth;
1755 rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
1757 rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1760 /* Called with mutex_lock(&bnad->conf_mutex) held */
1762 bnad_cleanup_rx(struct bnad *bnad, uint rx_id)
1764 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1765 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1766 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1767 unsigned long flags;
1768 int dim_timer_del = 0;
1774 spin_lock_irqsave(&bnad->bna_lock, flags);
1775 dim_timer_del = bnad_dim_timer_running(bnad);
1777 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1778 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1780 del_timer_sync(&bnad->dim_timer);
1783 bnad_napi_disable(bnad, rx_id);
1785 init_completion(&bnad->bnad_completions.rx_comp);
1786 spin_lock_irqsave(&bnad->bna_lock, flags);
1787 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
1788 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1789 wait_for_completion(&bnad->bnad_completions.rx_comp);
1791 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1792 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1794 spin_lock_irqsave(&bnad->bna_lock, flags);
1795 bna_rx_destroy(rx_info->rx);
1796 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1800 bnad_rx_res_free(bnad, res_info);
1803 /* Called with mutex_lock(&bnad->conf_mutex) held */
1805 bnad_setup_rx(struct bnad *bnad, uint rx_id)
1808 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1809 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1810 struct bna_intr_info *intr_info =
1811 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
1812 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1813 struct bna_rx_event_cbfn rx_cbfn;
1815 unsigned long flags;
1817 /* Initialize the Rx object configuration */
1818 bnad_init_rx_config(bnad, rx_config);
1820 /* Initialize the Rx event handlers */
1821 rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
1822 rx_cbfn.rcb_destroy_cbfn = NULL;
1823 rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
1824 rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
1825 rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
1826 rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
1828 /* Get BNA's resource requirement for one Rx object */
1829 spin_lock_irqsave(&bnad->bna_lock, flags);
1830 bna_rx_res_req(rx_config, res_info);
1831 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1833 /* Fill Unmap Q memory requirements */
1834 BNAD_FILL_UNMAPQ_MEM_REQ(
1835 &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1836 rx_config->num_paths +
1837 ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
1838 rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
1840 /* Allocate resource */
1841 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
1845 /* Ask BNA to create one Rx object, supplying required resources */
1846 spin_lock_irqsave(&bnad->bna_lock, flags);
1847 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
1849 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1854 /* Register ISR for the Rx object */
1855 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1856 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
1857 rx_config->num_paths);
1863 bnad_napi_enable(bnad, rx_id);
1865 spin_lock_irqsave(&bnad->bna_lock, flags);
1867 /* Set up Dynamic Interrupt Moderation Vector */
1868 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
1869 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
1871 /* Enable VLAN filtering only on the default Rx */
1872 bna_rx_vlanfilter_enable(rx);
1874 /* Start the DIM timer */
1875 bnad_dim_timer_start(bnad);
1879 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1884 bnad_cleanup_rx(bnad, rx_id);
1888 /* Called with conf_lock & bnad->bna_lock held */
1890 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
1892 struct bnad_tx_info *tx_info;
1894 tx_info = &bnad->tx_info[0];
1898 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
1901 /* Called with conf_lock & bnad->bna_lock held */
1903 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
1905 struct bnad_rx_info *rx_info;
1908 for (i = 0; i < bnad->num_rx; i++) {
1909 rx_info = &bnad->rx_info[i];
1912 bna_rx_coalescing_timeo_set(rx_info->rx,
1913 bnad->rx_coalescing_timeo);
1918 * Called with bnad->bna_lock held
1921 bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
1925 if (!is_valid_ether_addr(mac_addr))
1926 return -EADDRNOTAVAIL;
1928 /* If datapath is down, pretend everything went through */
1929 if (!bnad->rx_info[0].rx)
1932 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
1933 if (ret != BNA_CB_SUCCESS)
1934 return -EADDRNOTAVAIL;
1939 /* Should be called with conf_lock held */
1941 bnad_enable_default_bcast(struct bnad *bnad)
1943 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
1945 unsigned long flags;
1947 init_completion(&bnad->bnad_completions.mcast_comp);
1949 spin_lock_irqsave(&bnad->bna_lock, flags);
1950 ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
1951 bnad_cb_rx_mcast_add);
1952 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1954 if (ret == BNA_CB_SUCCESS)
1955 wait_for_completion(&bnad->bnad_completions.mcast_comp);
1959 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
1965 /* Statistics utilities */
1967 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
1971 for (i = 0; i < bnad->num_rx; i++) {
1972 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1973 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
1974 stats->rx_packets += bnad->rx_info[i].
1975 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
1976 stats->rx_bytes += bnad->rx_info[i].
1977 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
1978 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
1979 bnad->rx_info[i].rx_ctrl[j].ccb->
1981 stats->rx_packets +=
1982 bnad->rx_info[i].rx_ctrl[j].
1983 ccb->rcb[1]->rxq->rx_packets;
1985 bnad->rx_info[i].rx_ctrl[j].
1986 ccb->rcb[1]->rxq->rx_bytes;
1991 for (i = 0; i < bnad->num_tx; i++) {
1992 for (j = 0; j < bnad->num_txq_per_tx; j++) {
1993 if (bnad->tx_info[i].tcb[j]) {
1994 stats->tx_packets +=
1995 bnad->tx_info[i].tcb[j]->txq->tx_packets;
1997 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2004 * Must be called with the bna_lock held.
2007 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2009 struct bfi_ll_stats_mac *mac_stats;
2013 mac_stats = &bnad->stats.bna_stats->hw_stats->mac_stats;
2015 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2016 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2017 mac_stats->rx_undersize;
2018 stats->tx_errors = mac_stats->tx_fcs_error +
2019 mac_stats->tx_undersize;
2020 stats->rx_dropped = mac_stats->rx_drop;
2021 stats->tx_dropped = mac_stats->tx_drop;
2022 stats->multicast = mac_stats->rx_multicast;
2023 stats->collisions = mac_stats->tx_total_collision;
2025 stats->rx_length_errors = mac_stats->rx_frame_length_error;
2027 /* receive ring buffer overflow ?? */
2029 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2030 stats->rx_frame_errors = mac_stats->rx_alignment_error;
2031 /* recv'r fifo overrun */
2032 bmap = (u64)bnad->stats.bna_stats->rxf_bmap[0] |
2033 ((u64)bnad->stats.bna_stats->rxf_bmap[1] << 32);
2034 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
2036 stats->rx_fifo_errors +=
2037 bnad->stats.bna_stats->
2038 hw_stats->rxf_stats[i].frame_drops;
2046 bnad_mbox_irq_sync(struct bnad *bnad)
2049 unsigned long flags;
2051 spin_lock_irqsave(&bnad->bna_lock, flags);
2052 if (bnad->cfg_flags & BNAD_CF_MSIX)
2053 irq = bnad->msix_table[bnad->msix_num - 1].vector;
2055 irq = bnad->pcidev->irq;
2056 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2058 synchronize_irq(irq);
2061 /* Utility used by bnad_start_xmit, for doing TSO */
2063 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2067 /* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */
2068 BUG_ON(!(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
2069 skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6));
2070 if (skb_header_cloned(skb)) {
2071 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2073 BNAD_UPDATE_CTR(bnad, tso_err);
2079 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2080 * excluding the length field.
2082 if (skb->protocol == htons(ETH_P_IP)) {
2083 struct iphdr *iph = ip_hdr(skb);
2085 /* Do we really need these? */
2089 tcp_hdr(skb)->check =
2090 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2092 BNAD_UPDATE_CTR(bnad, tso4);
2094 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2096 BUG_ON(!(skb->protocol == htons(ETH_P_IPV6)));
2097 ipv6h->payload_len = 0;
2098 tcp_hdr(skb)->check =
2099 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2101 BNAD_UPDATE_CTR(bnad, tso6);
2108 * Initialize Q numbers depending on Rx Paths
2109 * Called with bnad->bna_lock held, because of cfg_flags
2113 bnad_q_num_init(struct bnad *bnad)
2117 rxps = min((uint)num_online_cpus(),
2118 (uint)(BNAD_MAX_RXS * BNAD_MAX_RXPS_PER_RX));
2120 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2121 rxps = 1; /* INTx */
2125 bnad->num_rxp_per_rx = rxps;
2126 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2130 * Adjusts the Q numbers, given a number of msix vectors
2131 * Give preference to RSS as opposed to Tx priority Queues,
2132 * in such a case, just use 1 Tx Q
2133 * Called with bnad->bna_lock held b'cos of cfg_flags access
2136 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors)
2138 bnad->num_txq_per_tx = 1;
2139 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2140 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2141 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2142 bnad->num_rxp_per_rx = msix_vectors -
2143 (bnad->num_tx * bnad->num_txq_per_tx) -
2144 BNAD_MAILBOX_MSIX_VECTORS;
2146 bnad->num_rxp_per_rx = 1;
2150 bnad_set_netdev_perm_addr(struct bnad *bnad)
2152 struct net_device *netdev = bnad->netdev;
2154 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
2155 if (is_zero_ether_addr(netdev->dev_addr))
2156 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
2159 /* Enable / disable device */
2161 bnad_device_disable(struct bnad *bnad)
2163 unsigned long flags;
2165 init_completion(&bnad->bnad_completions.ioc_comp);
2167 spin_lock_irqsave(&bnad->bna_lock, flags);
2168 bna_device_disable(&bnad->bna.device, BNA_HARD_CLEANUP);
2169 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2171 wait_for_completion(&bnad->bnad_completions.ioc_comp);
2176 bnad_device_enable(struct bnad *bnad)
2179 unsigned long flags;
2181 init_completion(&bnad->bnad_completions.ioc_comp);
2183 spin_lock_irqsave(&bnad->bna_lock, flags);
2184 bna_device_enable(&bnad->bna.device);
2185 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2187 wait_for_completion(&bnad->bnad_completions.ioc_comp);
2189 if (bnad->bnad_completions.ioc_comp_status)
2190 err = bnad->bnad_completions.ioc_comp_status;
2195 /* Free BNA resources */
2197 bnad_res_free(struct bnad *bnad)
2200 struct bna_res_info *res_info = &bnad->res_info[0];
2202 for (i = 0; i < BNA_RES_T_MAX; i++) {
2203 if (res_info[i].res_type == BNA_RES_T_MEM)
2204 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2206 bnad_mbox_irq_free(bnad, &res_info[i].res_u.intr_info);
2210 /* Allocates memory and interrupt resources for BNA */
2212 bnad_res_alloc(struct bnad *bnad)
2215 struct bna_res_info *res_info = &bnad->res_info[0];
2217 for (i = 0; i < BNA_RES_T_MAX; i++) {
2218 if (res_info[i].res_type == BNA_RES_T_MEM)
2219 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2221 err = bnad_mbox_irq_alloc(bnad,
2222 &res_info[i].res_u.intr_info);
2229 bnad_res_free(bnad);
2233 /* Interrupt enable / disable */
2235 bnad_enable_msix(struct bnad *bnad)
2239 unsigned long flags;
2241 spin_lock_irqsave(&bnad->bna_lock, flags);
2242 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2243 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2246 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2248 if (bnad->msix_table)
2251 tot_msix_num = bnad->msix_num + bnad->msix_diag_num;
2254 kcalloc(tot_msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2256 if (!bnad->msix_table)
2259 for (i = 0; i < tot_msix_num; i++)
2260 bnad->msix_table[i].entry = i;
2262 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, tot_msix_num);
2264 /* Not enough MSI-X vectors. */
2266 spin_lock_irqsave(&bnad->bna_lock, flags);
2267 /* ret = #of vectors that we got */
2268 bnad_q_num_adjust(bnad, ret);
2269 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2271 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx)
2273 * bnad->num_rxp_per_rx) +
2274 BNAD_MAILBOX_MSIX_VECTORS;
2275 tot_msix_num = bnad->msix_num + bnad->msix_diag_num;
2277 /* Try once more with adjusted numbers */
2278 /* If this fails, fall back to INTx */
2279 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
2290 kfree(bnad->msix_table);
2291 bnad->msix_table = NULL;
2293 bnad->msix_diag_num = 0;
2294 spin_lock_irqsave(&bnad->bna_lock, flags);
2295 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2296 bnad_q_num_init(bnad);
2297 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2301 bnad_disable_msix(struct bnad *bnad)
2304 unsigned long flags;
2306 spin_lock_irqsave(&bnad->bna_lock, flags);
2307 cfg_flags = bnad->cfg_flags;
2308 if (bnad->cfg_flags & BNAD_CF_MSIX)
2309 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2310 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2312 if (cfg_flags & BNAD_CF_MSIX) {
2313 pci_disable_msix(bnad->pcidev);
2314 kfree(bnad->msix_table);
2315 bnad->msix_table = NULL;
2319 /* Netdev entry points */
2321 bnad_open(struct net_device *netdev)
2324 struct bnad *bnad = netdev_priv(netdev);
2325 struct bna_pause_config pause_config;
2327 unsigned long flags;
2329 mutex_lock(&bnad->conf_mutex);
2332 err = bnad_setup_tx(bnad, 0);
2337 err = bnad_setup_rx(bnad, 0);
2342 pause_config.tx_pause = 0;
2343 pause_config.rx_pause = 0;
2345 mtu = ETH_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
2347 spin_lock_irqsave(&bnad->bna_lock, flags);
2348 bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
2349 bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
2350 bna_port_enable(&bnad->bna.port);
2351 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2353 /* Enable broadcast */
2354 bnad_enable_default_bcast(bnad);
2356 /* Set the UCAST address */
2357 spin_lock_irqsave(&bnad->bna_lock, flags);
2358 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2359 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2361 /* Start the stats timer */
2362 bnad_stats_timer_start(bnad);
2364 mutex_unlock(&bnad->conf_mutex);
2369 bnad_cleanup_tx(bnad, 0);
2372 mutex_unlock(&bnad->conf_mutex);
2377 bnad_stop(struct net_device *netdev)
2379 struct bnad *bnad = netdev_priv(netdev);
2380 unsigned long flags;
2382 mutex_lock(&bnad->conf_mutex);
2384 /* Stop the stats timer */
2385 bnad_stats_timer_stop(bnad);
2387 init_completion(&bnad->bnad_completions.port_comp);
2389 spin_lock_irqsave(&bnad->bna_lock, flags);
2390 bna_port_disable(&bnad->bna.port, BNA_HARD_CLEANUP,
2391 bnad_cb_port_disabled);
2392 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2394 wait_for_completion(&bnad->bnad_completions.port_comp);
2396 bnad_cleanup_tx(bnad, 0);
2397 bnad_cleanup_rx(bnad, 0);
2399 /* Synchronize mailbox IRQ */
2400 bnad_mbox_irq_sync(bnad);
2402 mutex_unlock(&bnad->conf_mutex);
2409 * bnad_start_xmit : Netdev entry point for Transmit
2410 * Called under lock held by net_device
2413 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2415 struct bnad *bnad = netdev_priv(netdev);
2417 u16 txq_prod, vlan_tag = 0;
2418 u32 unmap_prod, wis, wis_used, wi_range;
2419 u32 vectors, vect_id, i, acked;
2423 struct bnad_tx_info *tx_info;
2424 struct bna_tcb *tcb;
2425 struct bnad_unmap_q *unmap_q;
2426 dma_addr_t dma_addr;
2427 struct bna_txq_entry *txqent;
2428 bna_txq_wi_ctrl_flag_t flags;
2431 (skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) {
2433 return NETDEV_TX_OK;
2437 * Takes care of the Tx that is scheduled between clearing the flag
2438 * and the netif_stop_queue() call.
2440 if (unlikely(!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))) {
2442 return NETDEV_TX_OK;
2447 tx_info = &bnad->tx_info[tx_id];
2448 tcb = tx_info->tcb[tx_id];
2449 unmap_q = tcb->unmap_q;
2451 vectors = 1 + skb_shinfo(skb)->nr_frags;
2452 if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) {
2454 return NETDEV_TX_OK;
2456 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2459 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2460 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2461 if ((u16) (*tcb->hw_consumer_index) !=
2462 tcb->consumer_index &&
2463 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2464 acked = bnad_free_txbufs(bnad, tcb);
2465 bna_ib_ack(tcb->i_dbell, acked);
2466 smp_mb__before_clear_bit();
2467 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2469 netif_stop_queue(netdev);
2470 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2475 * Check again to deal with race condition between
2476 * netif_stop_queue here, and netif_wake_queue in
2477 * interrupt handler which is not inside netif tx lock.
2480 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2481 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2482 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2483 return NETDEV_TX_BUSY;
2485 netif_wake_queue(netdev);
2486 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2490 unmap_prod = unmap_q->producer_index;
2495 txq_prod = tcb->producer_index;
2496 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
2497 BUG_ON(!(wi_range <= tcb->q_depth));
2498 txqent->hdr.wi.reserved = 0;
2499 txqent->hdr.wi.num_vectors = vectors;
2500 txqent->hdr.wi.opcode =
2501 htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
2504 if (bnad->vlan_grp && vlan_tx_tag_present(skb)) {
2505 vlan_tag = (u16) vlan_tx_tag_get(skb);
2506 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2508 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2510 (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
2511 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2514 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2516 if (skb_is_gso(skb)) {
2517 err = bnad_tso_prepare(bnad, skb);
2520 return NETDEV_TX_OK;
2522 txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
2523 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2524 txqent->hdr.wi.l4_hdr_size_n_offset =
2525 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2526 (tcp_hdrlen(skb) >> 2,
2527 skb_transport_offset(skb)));
2528 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2531 txqent->hdr.wi.lso_mss = 0;
2533 if (skb->protocol == htons(ETH_P_IP))
2534 proto = ip_hdr(skb)->protocol;
2535 else if (skb->protocol == htons(ETH_P_IPV6)) {
2536 /* nexthdr may not be TCP immediately. */
2537 proto = ipv6_hdr(skb)->nexthdr;
2539 if (proto == IPPROTO_TCP) {
2540 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2541 txqent->hdr.wi.l4_hdr_size_n_offset =
2542 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2543 (0, skb_transport_offset(skb)));
2545 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2547 BUG_ON(!(skb_headlen(skb) >=
2548 skb_transport_offset(skb) + tcp_hdrlen(skb)));
2550 } else if (proto == IPPROTO_UDP) {
2551 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2552 txqent->hdr.wi.l4_hdr_size_n_offset =
2553 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2554 (0, skb_transport_offset(skb)));
2556 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2558 BUG_ON(!(skb_headlen(skb) >=
2559 skb_transport_offset(skb) +
2560 sizeof(struct udphdr)));
2562 err = skb_checksum_help(skb);
2563 BNAD_UPDATE_CTR(bnad, csum_help);
2566 BNAD_UPDATE_CTR(bnad, csum_help_err);
2567 return NETDEV_TX_OK;
2571 txqent->hdr.wi.lso_mss = 0;
2572 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2575 txqent->hdr.wi.flags = htons(flags);
2577 txqent->hdr.wi.frame_length = htonl(skb->len);
2579 unmap_q->unmap_array[unmap_prod].skb = skb;
2580 BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
2581 txqent->vector[vect_id].length = htons(skb_headlen(skb));
2582 dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
2584 pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2587 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2588 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2590 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2591 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
2592 u32 size = frag->size;
2594 if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2599 BNA_QE_INDX_ADD(txq_prod, wis_used,
2602 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
2604 BUG_ON(!(wi_range <= tcb->q_depth));
2607 txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
2610 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2611 txqent->vector[vect_id].length = htons(size);
2613 pci_map_page(bnad->pcidev, frag->page,
2614 frag->page_offset, size,
2616 pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2618 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2619 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2622 unmap_q->producer_index = unmap_prod;
2623 BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
2624 tcb->producer_index = txq_prod;
2627 bna_txq_prod_indx_doorbell(tcb);
2629 if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
2630 tasklet_schedule(&bnad->tx_free_tasklet);
2632 return NETDEV_TX_OK;
2636 * Used spin_lock to synchronize reading of stats structures, which
2637 * is written by BNA under the same lock.
2639 static struct rtnl_link_stats64 *
2640 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
2642 struct bnad *bnad = netdev_priv(netdev);
2643 unsigned long flags;
2645 spin_lock_irqsave(&bnad->bna_lock, flags);
2647 bnad_netdev_qstats_fill(bnad, stats);
2648 bnad_netdev_hwstats_fill(bnad, stats);
2650 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2656 bnad_set_rx_mode(struct net_device *netdev)
2658 struct bnad *bnad = netdev_priv(netdev);
2659 u32 new_mask, valid_mask;
2660 unsigned long flags;
2662 spin_lock_irqsave(&bnad->bna_lock, flags);
2664 new_mask = valid_mask = 0;
2666 if (netdev->flags & IFF_PROMISC) {
2667 if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2668 new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2669 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2670 bnad->cfg_flags |= BNAD_CF_PROMISC;
2673 if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2674 new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2675 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2676 bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2680 if (netdev->flags & IFF_ALLMULTI) {
2681 if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2682 new_mask |= BNA_RXMODE_ALLMULTI;
2683 valid_mask |= BNA_RXMODE_ALLMULTI;
2684 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2687 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2688 new_mask &= ~BNA_RXMODE_ALLMULTI;
2689 valid_mask |= BNA_RXMODE_ALLMULTI;
2690 bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2694 bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2696 if (!netdev_mc_empty(netdev)) {
2698 int mc_count = netdev_mc_count(netdev);
2700 /* Index 0 holds the broadcast address */
2702 kzalloc((mc_count + 1) * ETH_ALEN,
2707 memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2709 /* Copy rest of the MC addresses */
2710 bnad_netdev_mc_list_get(netdev, mcaddr_list);
2712 bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
2715 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2719 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2723 * bna_lock is used to sync writes to netdev->addr
2724 * conf_lock cannot be used since this call may be made
2725 * in a non-blocking context.
2728 bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
2731 struct bnad *bnad = netdev_priv(netdev);
2732 struct sockaddr *sa = (struct sockaddr *)mac_addr;
2733 unsigned long flags;
2735 spin_lock_irqsave(&bnad->bna_lock, flags);
2737 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
2740 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
2742 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2748 bnad_change_mtu(struct net_device *netdev, int new_mtu)
2751 unsigned long flags;
2753 struct bnad *bnad = netdev_priv(netdev);
2755 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
2758 mutex_lock(&bnad->conf_mutex);
2760 netdev->mtu = new_mtu;
2762 mtu = ETH_HLEN + new_mtu + ETH_FCS_LEN;
2764 spin_lock_irqsave(&bnad->bna_lock, flags);
2765 bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
2766 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2768 mutex_unlock(&bnad->conf_mutex);
2773 bnad_vlan_rx_register(struct net_device *netdev,
2774 struct vlan_group *vlan_grp)
2776 struct bnad *bnad = netdev_priv(netdev);
2778 mutex_lock(&bnad->conf_mutex);
2779 bnad->vlan_grp = vlan_grp;
2780 mutex_unlock(&bnad->conf_mutex);
2784 bnad_vlan_rx_add_vid(struct net_device *netdev,
2787 struct bnad *bnad = netdev_priv(netdev);
2788 unsigned long flags;
2790 if (!bnad->rx_info[0].rx)
2793 mutex_lock(&bnad->conf_mutex);
2795 spin_lock_irqsave(&bnad->bna_lock, flags);
2796 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
2797 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2799 mutex_unlock(&bnad->conf_mutex);
2803 bnad_vlan_rx_kill_vid(struct net_device *netdev,
2806 struct bnad *bnad = netdev_priv(netdev);
2807 unsigned long flags;
2809 if (!bnad->rx_info[0].rx)
2812 mutex_lock(&bnad->conf_mutex);
2814 spin_lock_irqsave(&bnad->bna_lock, flags);
2815 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
2816 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2818 mutex_unlock(&bnad->conf_mutex);
2821 #ifdef CONFIG_NET_POLL_CONTROLLER
2823 bnad_netpoll(struct net_device *netdev)
2825 struct bnad *bnad = netdev_priv(netdev);
2826 struct bnad_rx_info *rx_info;
2827 struct bnad_rx_ctrl *rx_ctrl;
2831 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2832 bna_intx_disable(&bnad->bna, curr_mask);
2833 bnad_isr(bnad->pcidev->irq, netdev);
2834 bna_intx_enable(&bnad->bna, curr_mask);
2836 for (i = 0; i < bnad->num_rx; i++) {
2837 rx_info = &bnad->rx_info[i];
2840 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2841 rx_ctrl = &rx_info->rx_ctrl[j];
2843 bnad_disable_rx_irq(bnad,
2845 bnad_netif_rx_schedule_poll(bnad,
2854 static const struct net_device_ops bnad_netdev_ops = {
2855 .ndo_open = bnad_open,
2856 .ndo_stop = bnad_stop,
2857 .ndo_start_xmit = bnad_start_xmit,
2858 .ndo_get_stats64 = bnad_get_stats64,
2859 .ndo_set_rx_mode = bnad_set_rx_mode,
2860 .ndo_set_multicast_list = bnad_set_rx_mode,
2861 .ndo_validate_addr = eth_validate_addr,
2862 .ndo_set_mac_address = bnad_set_mac_address,
2863 .ndo_change_mtu = bnad_change_mtu,
2864 .ndo_vlan_rx_register = bnad_vlan_rx_register,
2865 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
2866 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
2867 #ifdef CONFIG_NET_POLL_CONTROLLER
2868 .ndo_poll_controller = bnad_netpoll
2873 bnad_netdev_init(struct bnad *bnad, bool using_dac)
2875 struct net_device *netdev = bnad->netdev;
2877 netdev->features |= NETIF_F_IPV6_CSUM;
2878 netdev->features |= NETIF_F_TSO;
2879 netdev->features |= NETIF_F_TSO6;
2881 netdev->features |= NETIF_F_GRO;
2882 pr_warn("bna: GRO enabled, using kernel stack GRO\n");
2884 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2887 netdev->features |= NETIF_F_HIGHDMA;
2890 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
2891 NETIF_F_HW_VLAN_FILTER;
2893 netdev->vlan_features = netdev->features;
2894 netdev->mem_start = bnad->mmio_start;
2895 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
2897 netdev->netdev_ops = &bnad_netdev_ops;
2898 bnad_set_ethtool_ops(netdev);
2902 * 1. Initialize the bnad structure
2903 * 2. Setup netdev pointer in pci_dev
2904 * 3. Initialze Tx free tasklet
2905 * 4. Initialize no. of TxQ & CQs & MSIX vectors
2908 bnad_init(struct bnad *bnad,
2909 struct pci_dev *pdev, struct net_device *netdev)
2911 unsigned long flags;
2913 SET_NETDEV_DEV(netdev, &pdev->dev);
2914 pci_set_drvdata(pdev, netdev);
2916 bnad->netdev = netdev;
2917 bnad->pcidev = pdev;
2918 bnad->mmio_start = pci_resource_start(pdev, 0);
2919 bnad->mmio_len = pci_resource_len(pdev, 0);
2920 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
2922 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
2923 pci_set_drvdata(pdev, NULL);
2926 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
2927 (unsigned long long) bnad->mmio_len);
2929 spin_lock_irqsave(&bnad->bna_lock, flags);
2930 if (!bnad_msix_disable)
2931 bnad->cfg_flags = BNAD_CF_MSIX;
2933 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
2935 bnad_q_num_init(bnad);
2936 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2938 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
2939 (bnad->num_rx * bnad->num_rxp_per_rx) +
2940 BNAD_MAILBOX_MSIX_VECTORS;
2941 bnad->msix_diag_num = 2; /* 1 for Tx, 1 for Rx */
2943 bnad->txq_depth = BNAD_TXQ_DEPTH;
2944 bnad->rxq_depth = BNAD_RXQ_DEPTH;
2945 bnad->rx_csum = true;
2947 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
2948 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
2950 tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
2951 (unsigned long)bnad);
2957 * Must be called after bnad_pci_uninit()
2958 * so that iounmap() and pci_set_drvdata(NULL)
2959 * happens only after PCI uninitialization.
2962 bnad_uninit(struct bnad *bnad)
2965 iounmap(bnad->bar0);
2966 pci_set_drvdata(bnad->pcidev, NULL);
2971 a) Per device mutes used for serializing configuration
2972 changes from OS interface
2973 b) spin lock used to protect bna state machine
2976 bnad_lock_init(struct bnad *bnad)
2978 spin_lock_init(&bnad->bna_lock);
2979 mutex_init(&bnad->conf_mutex);
2983 bnad_lock_uninit(struct bnad *bnad)
2985 mutex_destroy(&bnad->conf_mutex);
2988 /* PCI Initialization */
2990 bnad_pci_init(struct bnad *bnad,
2991 struct pci_dev *pdev, bool *using_dac)
2995 err = pci_enable_device(pdev);
2998 err = pci_request_regions(pdev, BNAD_NAME);
3000 goto disable_device;
3001 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
3002 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
3005 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3007 err = pci_set_consistent_dma_mask(pdev,
3010 goto release_regions;
3014 pci_set_master(pdev);
3018 pci_release_regions(pdev);
3020 pci_disable_device(pdev);
3026 bnad_pci_uninit(struct pci_dev *pdev)
3028 pci_release_regions(pdev);
3029 pci_disable_device(pdev);
3032 static int __devinit
3033 bnad_pci_probe(struct pci_dev *pdev,
3034 const struct pci_device_id *pcidev_id)
3040 struct net_device *netdev;
3041 struct bfa_pcidev pcidev_info;
3042 unsigned long flags;
3044 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3045 pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3047 mutex_lock(&bnad_fwimg_mutex);
3048 if (!cna_get_firmware_buf(pdev)) {
3049 mutex_unlock(&bnad_fwimg_mutex);
3050 pr_warn("Failed to load Firmware Image!\n");
3053 mutex_unlock(&bnad_fwimg_mutex);
3056 * Allocates sizeof(struct net_device + struct bnad)
3057 * bnad = netdev->priv
3059 netdev = alloc_etherdev(sizeof(struct bnad));
3061 dev_err(&pdev->dev, "alloc_etherdev failed\n");
3065 bnad = netdev_priv(netdev);
3068 * PCI initialization
3069 * Output : using_dac = 1 for 64 bit DMA
3070 * = 0 for 32 bit DMA
3072 err = bnad_pci_init(bnad, pdev, &using_dac);
3076 bnad_lock_init(bnad);
3078 * Initialize bnad structure
3079 * Setup relation between pci_dev & netdev
3080 * Init Tx free tasklet
3082 err = bnad_init(bnad, pdev, netdev);
3085 /* Initialize netdev structure, set up ethtool ops */
3086 bnad_netdev_init(bnad, using_dac);
3088 bnad_enable_msix(bnad);
3090 /* Get resource requirement form bna */
3091 bna_res_req(&bnad->res_info[0]);
3093 /* Allocate resources from bna */
3094 err = bnad_res_alloc(bnad);
3100 /* Setup pcidev_info for bna_init() */
3101 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3102 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3103 pcidev_info.device_id = bnad->pcidev->device;
3104 pcidev_info.pci_bar_kva = bnad->bar0;
3106 mutex_lock(&bnad->conf_mutex);
3108 spin_lock_irqsave(&bnad->bna_lock, flags);
3109 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3111 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3113 bnad->stats.bna_stats = &bna->stats;
3116 setup_timer(&bnad->bna.device.ioc.ioc_timer, bnad_ioc_timeout,
3117 ((unsigned long)bnad));
3118 setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check,
3119 ((unsigned long)bnad));
3120 setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_ioc_sem_timeout,
3121 ((unsigned long)bnad));
3123 /* Now start the timer before calling IOC */
3124 mod_timer(&bnad->bna.device.ioc.ioc_timer,
3125 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3129 * Don't care even if err != 0, bna state machine will
3132 err = bnad_device_enable(bnad);
3134 /* Get the burnt-in mac */
3135 spin_lock_irqsave(&bnad->bna_lock, flags);
3136 bna_port_mac_get(&bna->port, &bnad->perm_addr);
3137 bnad_set_netdev_perm_addr(bnad);
3138 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3140 mutex_unlock(&bnad->conf_mutex);
3143 * Make sure the link appears down to the stack
3145 netif_carrier_off(netdev);
3147 /* Finally, reguister with net_device layer */
3148 err = register_netdev(netdev);
3150 pr_err("BNA : Registering with netdev failed\n");
3151 goto disable_device;
3157 mutex_lock(&bnad->conf_mutex);
3158 bnad_device_disable(bnad);
3159 del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
3160 del_timer_sync(&bnad->bna.device.ioc.sem_timer);
3161 del_timer_sync(&bnad->bna.device.ioc.hb_timer);
3162 spin_lock_irqsave(&bnad->bna_lock, flags);
3164 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3165 mutex_unlock(&bnad->conf_mutex);
3167 bnad_res_free(bnad);
3168 bnad_disable_msix(bnad);
3170 bnad_pci_uninit(pdev);
3171 bnad_lock_uninit(bnad);
3174 free_netdev(netdev);
3178 static void __devexit
3179 bnad_pci_remove(struct pci_dev *pdev)
3181 struct net_device *netdev = pci_get_drvdata(pdev);
3184 unsigned long flags;
3189 pr_info("%s bnad_pci_remove\n", netdev->name);
3190 bnad = netdev_priv(netdev);
3193 unregister_netdev(netdev);
3195 mutex_lock(&bnad->conf_mutex);
3196 bnad_device_disable(bnad);
3197 del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
3198 del_timer_sync(&bnad->bna.device.ioc.sem_timer);
3199 del_timer_sync(&bnad->bna.device.ioc.hb_timer);
3200 spin_lock_irqsave(&bnad->bna_lock, flags);
3202 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3203 mutex_unlock(&bnad->conf_mutex);
3205 bnad_res_free(bnad);
3206 bnad_disable_msix(bnad);
3207 bnad_pci_uninit(pdev);
3208 bnad_lock_uninit(bnad);
3210 free_netdev(netdev);
3213 const struct pci_device_id bnad_pci_id_table[] = {
3215 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3216 PCI_DEVICE_ID_BROCADE_CT),
3217 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3218 .class_mask = 0xffff00
3222 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3224 static struct pci_driver bnad_pci_driver = {
3226 .id_table = bnad_pci_id_table,
3227 .probe = bnad_pci_probe,
3228 .remove = __devexit_p(bnad_pci_remove),
3232 bnad_module_init(void)
3236 pr_info("Brocade 10G Ethernet driver\n");
3238 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3240 err = pci_register_driver(&bnad_pci_driver);
3242 pr_err("bna : PCI registration failed in module init "
3251 bnad_module_exit(void)
3253 pci_unregister_driver(&bnad_pci_driver);
3256 release_firmware(bfi_fw);
3259 module_init(bnad_module_init);
3260 module_exit(bnad_module_exit);
3262 MODULE_AUTHOR("Brocade");
3263 MODULE_LICENSE("GPL");
3264 MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3265 MODULE_VERSION(BNAD_VERSION);
3266 MODULE_FIRMWARE(CNA_FW_FILE_CT);