1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
19 #include <linux/etherdevice.h>
21 #include <linux/ipv6.h>
22 #include <net/ip6_checksum.h>
23 #include "bnx2x_cmn.h"
26 #include <linux/if_vlan.h>
29 static int bnx2x_poll(struct napi_struct *napi, int budget);
31 /* free skb in the packet ring at pos idx
32 * return idx of last bd freed
34 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
37 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
38 struct eth_tx_start_bd *tx_start_bd;
39 struct eth_tx_bd *tx_data_bd;
40 struct sk_buff *skb = tx_buf->skb;
41 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
44 /* prefetch skb end pointer to speedup dev_kfree_skb() */
47 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
51 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
52 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
53 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
54 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
56 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
57 #ifdef BNX2X_STOP_ON_ERROR
58 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
59 BNX2X_ERR("BAD nbd!\n");
63 new_cons = nbd + tx_buf->first_bd;
66 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
68 /* Skip a parse bd... */
70 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
72 /* ...and the TSO split header bd since they have no mapping */
73 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
75 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
81 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
82 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
83 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
84 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
86 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
98 int bnx2x_tx_int(struct bnx2x_fastpath *fp)
100 struct bnx2x *bp = fp->bp;
101 struct netdev_queue *txq;
102 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
104 #ifdef BNX2X_STOP_ON_ERROR
105 if (unlikely(bp->panic))
109 txq = netdev_get_tx_queue(bp->dev, fp->index);
110 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
111 sw_cons = fp->tx_pkt_cons;
113 while (sw_cons != hw_cons) {
116 pkt_cons = TX_BD(sw_cons);
118 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
120 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
121 hw_cons, sw_cons, pkt_cons);
123 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
125 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
128 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
132 fp->tx_pkt_cons = sw_cons;
133 fp->tx_bd_cons = bd_cons;
135 /* Need to make the tx_bd_cons update visible to start_xmit()
136 * before checking for netif_tx_queue_stopped(). Without the
137 * memory barrier, there is a small possibility that
138 * start_xmit() will miss it and cause the queue to be stopped
143 /* TBD need a thresh? */
144 if (unlikely(netif_tx_queue_stopped(txq))) {
145 /* Taking tx_lock() is needed to prevent reenabling the queue
146 * while it's empty. This could have happen if rx_action() gets
147 * suspended in bnx2x_tx_int() after the condition before
148 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
150 * stops the queue->sees fresh tx_bd_cons->releases the queue->
151 * sends some packets consuming the whole queue again->
155 __netif_tx_lock(txq, smp_processor_id());
157 if ((netif_tx_queue_stopped(txq)) &&
158 (bp->state == BNX2X_STATE_OPEN) &&
159 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
160 netif_tx_wake_queue(txq);
162 __netif_tx_unlock(txq);
167 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
170 u16 last_max = fp->last_max_sge;
172 if (SUB_S16(idx, last_max) > 0)
173 fp->last_max_sge = idx;
176 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
177 struct eth_fast_path_rx_cqe *fp_cqe)
179 struct bnx2x *bp = fp->bp;
180 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
181 le16_to_cpu(fp_cqe->len_on_bd)) >>
183 u16 last_max, last_elem, first_elem;
190 /* First mark all used pages */
191 for (i = 0; i < sge_len; i++)
192 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
194 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
195 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
197 /* Here we assume that the last SGE index is the biggest */
198 prefetch((void *)(fp->sge_mask));
199 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
201 last_max = RX_SGE(fp->last_max_sge);
202 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
203 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
205 /* If ring is not full */
206 if (last_elem + 1 != first_elem)
209 /* Now update the prod */
210 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
211 if (likely(fp->sge_mask[i]))
214 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
215 delta += RX_SGE_MASK_ELEM_SZ;
219 fp->rx_sge_prod += delta;
220 /* clear page-end entries */
221 bnx2x_clear_sge_mask_next_elems(fp);
224 DP(NETIF_MSG_RX_STATUS,
225 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
226 fp->last_max_sge, fp->rx_sge_prod);
229 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
230 struct sk_buff *skb, u16 cons, u16 prod)
232 struct bnx2x *bp = fp->bp;
233 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
234 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
235 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
238 /* move empty skb from pool to prod and map it */
239 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
240 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
241 bp->rx_buf_size, DMA_FROM_DEVICE);
242 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
244 /* move partial skb from cons to pool (don't unmap yet) */
245 fp->tpa_pool[queue] = *cons_rx_buf;
247 /* mark bin state as start - print error if current state != stop */
248 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
249 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
251 fp->tpa_state[queue] = BNX2X_TPA_START;
253 /* point prod_bd to new skb */
254 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
255 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
257 #ifdef BNX2X_STOP_ON_ERROR
258 fp->tpa_queue_used |= (1 << queue);
259 #ifdef _ASM_GENERIC_INT_L64_H
260 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
262 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
268 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
270 struct eth_fast_path_rx_cqe *fp_cqe,
273 struct sw_rx_page *rx_pg, old_rx_pg;
274 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
275 u32 i, frag_len, frag_size, pages;
279 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
280 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
282 /* This is needed in order to enable forwarding support */
284 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
285 max(frag_size, (u32)len_on_bd));
287 #ifdef BNX2X_STOP_ON_ERROR
288 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
289 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
291 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
292 fp_cqe->pkt_len, len_on_bd);
298 /* Run through the SGL and compose the fragmented skb */
299 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
300 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
302 /* FW gives the indices of the SGE as if the ring is an array
303 (meaning that "next" element will consume 2 indices) */
304 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
305 rx_pg = &fp->rx_page_ring[sge_idx];
308 /* If we fail to allocate a substitute page, we simply stop
309 where we are and drop the whole packet */
310 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
312 fp->eth_q_stats.rx_skb_alloc_failed++;
316 /* Unmap the page as we r going to pass it to the stack */
317 dma_unmap_page(&bp->pdev->dev,
318 dma_unmap_addr(&old_rx_pg, mapping),
319 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
321 /* Add one frag and update the appropriate fields in the skb */
322 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
324 skb->data_len += frag_len;
325 skb->truesize += frag_len;
326 skb->len += frag_len;
328 frag_size -= frag_len;
334 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
335 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
338 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
339 struct sk_buff *skb = rx_buf->skb;
341 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
343 /* Unmap skb in the pool anyway, as we are going to change
344 pool entry status to BNX2X_TPA_STOP even if new skb allocation
346 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
347 bp->rx_buf_size, DMA_FROM_DEVICE);
349 if (likely(new_skb)) {
350 /* fix ip xsum and give it to the stack */
351 /* (no need to map the new skb) */
354 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
356 int is_not_hwaccel_vlan_cqe =
357 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
361 prefetch(((char *)(skb)) + 128);
363 #ifdef BNX2X_STOP_ON_ERROR
364 if (pad + len > bp->rx_buf_size) {
365 BNX2X_ERR("skb_put is about to fail... "
366 "pad %d len %d rx_buf_size %d\n",
367 pad, len, bp->rx_buf_size);
373 skb_reserve(skb, pad);
376 skb->protocol = eth_type_trans(skb, bp->dev);
377 skb->ip_summed = CHECKSUM_UNNECESSARY;
382 iph = (struct iphdr *)skb->data;
384 /* If there is no Rx VLAN offloading -
385 take VLAN tag into an account */
386 if (unlikely(is_not_hwaccel_vlan_cqe))
387 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
390 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
393 if (!bnx2x_fill_frag_skb(bp, fp, skb,
394 &cqe->fast_path_cqe, cqe_idx)) {
396 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
397 (!is_not_hwaccel_vlan_cqe))
398 vlan_gro_receive(&fp->napi, bp->vlgrp,
399 le16_to_cpu(cqe->fast_path_cqe.
403 napi_gro_receive(&fp->napi, skb);
405 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
406 " - dropping packet!\n");
411 /* put new skb in bin */
412 fp->tpa_pool[queue].skb = new_skb;
415 /* else drop the packet and keep the buffer in the bin */
416 DP(NETIF_MSG_RX_STATUS,
417 "Failed to allocate new skb - dropping packet!\n");
418 fp->eth_q_stats.rx_skb_alloc_failed++;
421 fp->tpa_state[queue] = BNX2X_TPA_STOP;
424 /* Set Toeplitz hash value in the skb using the value from the
425 * CQE (calculated by HW).
427 static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
430 /* Set Toeplitz hash from CQE */
431 if ((bp->dev->features & NETIF_F_RXHASH) &&
432 (cqe->fast_path_cqe.status_flags &
433 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
435 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
438 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
440 struct bnx2x *bp = fp->bp;
441 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
442 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
445 #ifdef BNX2X_STOP_ON_ERROR
446 if (unlikely(bp->panic))
450 /* CQ "next element" is of the size of the regular element,
451 that's why it's ok here */
452 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
453 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
456 bd_cons = fp->rx_bd_cons;
457 bd_prod = fp->rx_bd_prod;
458 bd_prod_fw = bd_prod;
459 sw_comp_cons = fp->rx_comp_cons;
460 sw_comp_prod = fp->rx_comp_prod;
462 /* Memory barrier necessary as speculative reads of the rx
463 * buffer can be ahead of the index in the status block
467 DP(NETIF_MSG_RX_STATUS,
468 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
469 fp->index, hw_comp_cons, sw_comp_cons);
471 while (sw_comp_cons != hw_comp_cons) {
472 struct sw_rx_bd *rx_buf = NULL;
474 union eth_rx_cqe *cqe;
478 comp_ring_cons = RCQ_BD(sw_comp_cons);
479 bd_prod = RX_BD(bd_prod);
480 bd_cons = RX_BD(bd_cons);
482 /* Prefetch the page containing the BD descriptor
483 at producer's index. It will be needed when new skb is
485 prefetch((void *)(PAGE_ALIGN((unsigned long)
486 (&fp->rx_desc_ring[bd_prod])) -
489 cqe = &fp->rx_comp_ring[comp_ring_cons];
490 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
492 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
493 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
494 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
495 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
496 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
497 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
499 /* is this a slowpath msg? */
500 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
501 bnx2x_sp_event(fp, cqe);
504 /* this is an rx packet */
506 rx_buf = &fp->rx_buf_ring[bd_cons];
509 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
510 pad = cqe->fast_path_cqe.placement_offset;
512 /* If CQE is marked both TPA_START and TPA_END
513 it is a non-TPA CQE */
514 if ((!fp->disable_tpa) &&
515 (TPA_TYPE(cqe_fp_flags) !=
516 (TPA_TYPE_START | TPA_TYPE_END))) {
517 u16 queue = cqe->fast_path_cqe.queue_index;
519 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
520 DP(NETIF_MSG_RX_STATUS,
521 "calling tpa_start on queue %d\n",
524 bnx2x_tpa_start(fp, queue, skb,
527 /* Set Toeplitz hash for an LRO skb */
528 bnx2x_set_skb_rxhash(bp, cqe, skb);
533 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
534 DP(NETIF_MSG_RX_STATUS,
535 "calling tpa_stop on queue %d\n",
538 if (!BNX2X_RX_SUM_FIX(cqe))
539 BNX2X_ERR("STOP on none TCP "
542 /* This is a size of the linear data
544 len = le16_to_cpu(cqe->fast_path_cqe.
546 bnx2x_tpa_stop(bp, fp, queue, pad,
547 len, cqe, comp_ring_cons);
548 #ifdef BNX2X_STOP_ON_ERROR
553 bnx2x_update_sge_prod(fp,
554 &cqe->fast_path_cqe);
559 dma_sync_single_for_device(&bp->pdev->dev,
560 dma_unmap_addr(rx_buf, mapping),
561 pad + RX_COPY_THRESH,
563 prefetch(((char *)(skb)) + 128);
565 /* is this an error packet? */
566 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
568 "ERROR flags %x rx packet %u\n",
569 cqe_fp_flags, sw_comp_cons);
570 fp->eth_q_stats.rx_err_discard_pkt++;
574 /* Since we don't have a jumbo ring
575 * copy small packets if mtu > 1500
577 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
578 (len <= RX_COPY_THRESH)) {
579 struct sk_buff *new_skb;
581 new_skb = netdev_alloc_skb(bp->dev,
583 if (new_skb == NULL) {
585 "ERROR packet dropped "
586 "because of alloc failure\n");
587 fp->eth_q_stats.rx_skb_alloc_failed++;
592 skb_copy_from_linear_data_offset(skb, pad,
593 new_skb->data + pad, len);
594 skb_reserve(new_skb, pad);
595 skb_put(new_skb, len);
597 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
602 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
603 dma_unmap_single(&bp->pdev->dev,
604 dma_unmap_addr(rx_buf, mapping),
607 skb_reserve(skb, pad);
612 "ERROR packet dropped because "
613 "of alloc failure\n");
614 fp->eth_q_stats.rx_skb_alloc_failed++;
616 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
620 skb->protocol = eth_type_trans(skb, bp->dev);
622 /* Set Toeplitz hash for a none-LRO skb */
623 bnx2x_set_skb_rxhash(bp, cqe, skb);
625 skb->ip_summed = CHECKSUM_NONE;
627 if (likely(BNX2X_RX_CSUM_OK(cqe)))
628 skb->ip_summed = CHECKSUM_UNNECESSARY;
630 fp->eth_q_stats.hw_csum_err++;
634 skb_record_rx_queue(skb, fp->index);
637 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
638 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
640 vlan_gro_receive(&fp->napi, bp->vlgrp,
641 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
644 napi_gro_receive(&fp->napi, skb);
650 bd_cons = NEXT_RX_IDX(bd_cons);
651 bd_prod = NEXT_RX_IDX(bd_prod);
652 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
655 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
656 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
658 if (rx_pkt == budget)
662 fp->rx_bd_cons = bd_cons;
663 fp->rx_bd_prod = bd_prod_fw;
664 fp->rx_comp_cons = sw_comp_cons;
665 fp->rx_comp_prod = sw_comp_prod;
667 /* Update producers */
668 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
671 fp->rx_pkt += rx_pkt;
677 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
679 struct bnx2x_fastpath *fp = fp_cookie;
680 struct bnx2x *bp = fp->bp;
682 /* Return here if interrupt is disabled */
683 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
684 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
688 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
689 fp->index, fp->sb_id);
690 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
692 #ifdef BNX2X_STOP_ON_ERROR
693 if (unlikely(bp->panic))
697 /* Handle Rx and Tx according to MSI-X vector */
698 prefetch(fp->rx_cons_sb);
699 prefetch(fp->tx_cons_sb);
700 prefetch(&fp->status_blk->u_status_block.status_block_index);
701 prefetch(&fp->status_blk->c_status_block.status_block_index);
702 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
708 /* HW Lock for shared dual port PHYs */
709 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
711 mutex_lock(&bp->port.phy_mutex);
713 if (bp->port.need_hw_lock)
714 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
717 void bnx2x_release_phy_lock(struct bnx2x *bp)
719 if (bp->port.need_hw_lock)
720 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
722 mutex_unlock(&bp->port.phy_mutex);
725 void bnx2x_link_report(struct bnx2x *bp)
727 if (bp->flags & MF_FUNC_DIS) {
728 netif_carrier_off(bp->dev);
729 netdev_err(bp->dev, "NIC Link is Down\n");
733 if (bp->link_vars.link_up) {
736 if (bp->state == BNX2X_STATE_OPEN)
737 netif_carrier_on(bp->dev);
738 netdev_info(bp->dev, "NIC Link is Up, ");
740 line_speed = bp->link_vars.line_speed;
745 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
746 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
747 if (vn_max_rate < line_speed)
748 line_speed = vn_max_rate;
750 pr_cont("%d Mbps ", line_speed);
752 if (bp->link_vars.duplex == DUPLEX_FULL)
753 pr_cont("full duplex");
755 pr_cont("half duplex");
757 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
758 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
759 pr_cont(", receive ");
760 if (bp->link_vars.flow_ctrl &
762 pr_cont("& transmit ");
764 pr_cont(", transmit ");
766 pr_cont("flow control ON");
770 } else { /* link_down */
771 netif_carrier_off(bp->dev);
772 netdev_err(bp->dev, "NIC Link is Down\n");
776 void bnx2x_init_rx_rings(struct bnx2x *bp)
778 int func = BP_FUNC(bp);
779 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
780 ETH_MAX_AGGREGATION_QUEUES_E1H;
781 u16 ring_prod, cqe_ring_prod;
784 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
786 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
788 if (bp->flags & TPA_ENABLE_FLAG) {
790 for_each_queue(bp, j) {
791 struct bnx2x_fastpath *fp = &bp->fp[j];
793 for (i = 0; i < max_agg_queues; i++) {
794 fp->tpa_pool[i].skb =
795 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
796 if (!fp->tpa_pool[i].skb) {
797 BNX2X_ERR("Failed to allocate TPA "
798 "skb pool for queue[%d] - "
799 "disabling TPA on this "
801 bnx2x_free_tpa_pool(bp, fp, i);
805 dma_unmap_addr_set((struct sw_rx_bd *)
806 &bp->fp->tpa_pool[i],
808 fp->tpa_state[i] = BNX2X_TPA_STOP;
813 for_each_queue(bp, j) {
814 struct bnx2x_fastpath *fp = &bp->fp[j];
817 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
818 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
820 /* "next page" elements initialization */
822 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
823 struct eth_rx_sge *sge;
825 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
827 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
828 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
830 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
831 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
834 bnx2x_init_sge_ring_bit_mask(fp);
837 for (i = 1; i <= NUM_RX_RINGS; i++) {
838 struct eth_rx_bd *rx_bd;
840 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
842 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
843 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
845 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
846 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
850 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
851 struct eth_rx_cqe_next_page *nextpg;
853 nextpg = (struct eth_rx_cqe_next_page *)
854 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
856 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
857 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
859 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
860 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
863 /* Allocate SGEs and initialize the ring elements */
864 for (i = 0, ring_prod = 0;
865 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
867 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
868 BNX2X_ERR("was only able to allocate "
870 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
871 /* Cleanup already allocated elements */
872 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
873 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
878 ring_prod = NEXT_SGE_IDX(ring_prod);
880 fp->rx_sge_prod = ring_prod;
882 /* Allocate BDs and initialize BD ring */
883 fp->rx_comp_cons = 0;
884 cqe_ring_prod = ring_prod = 0;
885 for (i = 0; i < bp->rx_ring_size; i++) {
886 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
887 BNX2X_ERR("was only able to allocate "
888 "%d rx skbs on queue[%d]\n", i, j);
889 fp->eth_q_stats.rx_skb_alloc_failed++;
892 ring_prod = NEXT_RX_IDX(ring_prod);
893 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
894 WARN_ON(ring_prod <= i);
897 fp->rx_bd_prod = ring_prod;
898 /* must not have more available CQEs than BDs */
899 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
901 fp->rx_pkt = fp->rx_calls = 0;
904 * this will generate an interrupt (to the TSTORM)
905 * must only be done after chip is initialized
907 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
912 REG_WR(bp, BAR_USTRORM_INTMEM +
913 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
914 U64_LO(fp->rx_comp_mapping));
915 REG_WR(bp, BAR_USTRORM_INTMEM +
916 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
917 U64_HI(fp->rx_comp_mapping));
920 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
924 for_each_queue(bp, i) {
925 struct bnx2x_fastpath *fp = &bp->fp[i];
927 u16 bd_cons = fp->tx_bd_cons;
928 u16 sw_prod = fp->tx_pkt_prod;
929 u16 sw_cons = fp->tx_pkt_cons;
931 while (sw_cons != sw_prod) {
932 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
938 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
942 for_each_queue(bp, j) {
943 struct bnx2x_fastpath *fp = &bp->fp[j];
945 for (i = 0; i < NUM_RX_BD; i++) {
946 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
947 struct sk_buff *skb = rx_buf->skb;
952 dma_unmap_single(&bp->pdev->dev,
953 dma_unmap_addr(rx_buf, mapping),
954 bp->rx_buf_size, DMA_FROM_DEVICE);
959 if (!fp->disable_tpa)
960 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
961 ETH_MAX_AGGREGATION_QUEUES_E1 :
962 ETH_MAX_AGGREGATION_QUEUES_E1H);
966 void bnx2x_free_skbs(struct bnx2x *bp)
968 bnx2x_free_tx_skbs(bp);
969 bnx2x_free_rx_skbs(bp);
972 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
976 free_irq(bp->msix_table[0].vector, bp->dev);
977 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
978 bp->msix_table[0].vector);
983 for_each_queue(bp, i) {
984 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
985 "state %x\n", i, bp->msix_table[i + offset].vector,
986 bnx2x_fp(bp, i, state));
988 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
992 void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
994 if (bp->flags & USING_MSIX_FLAG) {
996 bnx2x_free_msix_irqs(bp);
997 pci_disable_msix(bp->pdev);
998 bp->flags &= ~USING_MSIX_FLAG;
1000 } else if (bp->flags & USING_MSI_FLAG) {
1002 free_irq(bp->pdev->irq, bp->dev);
1003 pci_disable_msi(bp->pdev);
1004 bp->flags &= ~USING_MSI_FLAG;
1006 } else if (!disable_only)
1007 free_irq(bp->pdev->irq, bp->dev);
1010 static int bnx2x_enable_msix(struct bnx2x *bp)
1012 int i, rc, offset = 1;
1015 bp->msix_table[0].entry = igu_vec;
1016 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
1019 igu_vec = BP_L_ID(bp) + offset;
1020 bp->msix_table[1].entry = igu_vec;
1021 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
1024 for_each_queue(bp, i) {
1025 igu_vec = BP_L_ID(bp) + offset + i;
1026 bp->msix_table[i + offset].entry = igu_vec;
1027 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1028 "(fastpath #%u)\n", i + offset, igu_vec, i);
1031 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
1032 BNX2X_NUM_QUEUES(bp) + offset);
1035 * reconfigure number of tx/rx queues according to available
1038 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1039 /* vectors available for FP */
1040 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
1043 "Trying to use less MSI-X vectors: %d\n", rc);
1045 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1049 "MSI-X is not attainable rc %d\n", rc);
1053 bp->num_queues = min(bp->num_queues, fp_vec);
1055 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1058 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1062 bp->flags |= USING_MSIX_FLAG;
1067 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1069 int i, rc, offset = 1;
1071 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1072 bp->dev->name, bp->dev);
1074 BNX2X_ERR("request sp irq failed\n");
1081 for_each_queue(bp, i) {
1082 struct bnx2x_fastpath *fp = &bp->fp[i];
1083 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1086 rc = request_irq(bp->msix_table[i + offset].vector,
1087 bnx2x_msix_fp_int, 0, fp->name, fp);
1089 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
1090 bnx2x_free_msix_irqs(bp);
1094 fp->state = BNX2X_FP_STATE_IRQ;
1097 i = BNX2X_NUM_QUEUES(bp);
1098 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1100 bp->msix_table[0].vector,
1101 0, bp->msix_table[offset].vector,
1102 i - 1, bp->msix_table[offset + i - 1].vector);
1107 static int bnx2x_enable_msi(struct bnx2x *bp)
1111 rc = pci_enable_msi(bp->pdev);
1113 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1116 bp->flags |= USING_MSI_FLAG;
1121 static int bnx2x_req_irq(struct bnx2x *bp)
1123 unsigned long flags;
1126 if (bp->flags & USING_MSI_FLAG)
1129 flags = IRQF_SHARED;
1131 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1132 bp->dev->name, bp->dev);
1134 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1139 static void bnx2x_napi_enable(struct bnx2x *bp)
1143 for_each_queue(bp, i)
1144 napi_enable(&bnx2x_fp(bp, i, napi));
1147 static void bnx2x_napi_disable(struct bnx2x *bp)
1151 for_each_queue(bp, i)
1152 napi_disable(&bnx2x_fp(bp, i, napi));
1155 void bnx2x_netif_start(struct bnx2x *bp)
1159 intr_sem = atomic_dec_and_test(&bp->intr_sem);
1160 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1163 if (netif_running(bp->dev)) {
1164 bnx2x_napi_enable(bp);
1165 bnx2x_int_enable(bp);
1166 if (bp->state == BNX2X_STATE_OPEN)
1167 netif_tx_wake_all_queues(bp->dev);
1172 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1174 bnx2x_int_disable_sync(bp, disable_hw);
1175 bnx2x_napi_disable(bp);
1176 netif_tx_disable(bp->dev);
1178 static int bnx2x_set_num_queues(struct bnx2x *bp)
1182 switch (bp->int_mode) {
1186 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
1189 /* Set number of queues according to bp->multi_mode value */
1190 bnx2x_set_num_queues_msix(bp);
1192 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
1195 /* if we can't use MSI-X we only need one fp,
1196 * so try to enable MSI-X with the requested number of fp's
1197 * and fallback to MSI or legacy INTx with one fp
1199 rc = bnx2x_enable_msix(bp);
1201 /* failed to enable MSI-X */
1205 bp->dev->real_num_tx_queues = bp->num_queues;
1209 /* must be called with rtnl_lock */
1210 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1215 #ifdef BNX2X_STOP_ON_ERROR
1216 if (unlikely(bp->panic))
1220 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1222 rc = bnx2x_set_num_queues(bp);
1224 if (bnx2x_alloc_mem(bp)) {
1225 bnx2x_free_irq(bp, true);
1229 for_each_queue(bp, i)
1230 bnx2x_fp(bp, i, disable_tpa) =
1231 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1233 for_each_queue(bp, i)
1234 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
1237 bnx2x_napi_enable(bp);
1239 if (bp->flags & USING_MSIX_FLAG) {
1240 rc = bnx2x_req_msix_irqs(bp);
1242 bnx2x_free_irq(bp, true);
1246 /* Fall to INTx if failed to enable MSI-X due to lack of
1247 memory (in bnx2x_set_num_queues()) */
1248 if ((rc != -ENOMEM) && (bp->int_mode != INT_MODE_INTx))
1249 bnx2x_enable_msi(bp);
1251 rc = bnx2x_req_irq(bp);
1253 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1254 bnx2x_free_irq(bp, true);
1257 if (bp->flags & USING_MSI_FLAG) {
1258 bp->dev->irq = bp->pdev->irq;
1259 netdev_info(bp->dev, "using MSI IRQ %d\n",
1264 /* Send LOAD_REQUEST command to MCP
1265 Returns the type of LOAD command:
1266 if it is the first port to be initialized
1267 common blocks should be initialized, otherwise - not
1269 if (!BP_NOMCP(bp)) {
1270 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
1272 BNX2X_ERR("MCP response failure, aborting\n");
1276 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1277 rc = -EBUSY; /* other port in diagnostic mode */
1282 int port = BP_PORT(bp);
1284 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
1285 load_count[0], load_count[1], load_count[2]);
1287 load_count[1 + port]++;
1288 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
1289 load_count[0], load_count[1], load_count[2]);
1290 if (load_count[0] == 1)
1291 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1292 else if (load_count[1 + port] == 1)
1293 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1295 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1298 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1299 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1303 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1306 rc = bnx2x_init_hw(bp, load_code);
1308 BNX2X_ERR("HW init failed, aborting\n");
1309 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
1310 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
1311 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
1315 /* Setup NIC internals and enable interrupts */
1316 bnx2x_nic_init(bp, load_code);
1318 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
1319 (bp->common.shmem2_base))
1320 SHMEM2_WR(bp, dcc_support,
1321 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1322 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1324 /* Send LOAD_DONE command to MCP */
1325 if (!BP_NOMCP(bp)) {
1326 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
1328 BNX2X_ERR("MCP response failure, aborting\n");
1334 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1336 rc = bnx2x_setup_leading(bp);
1338 BNX2X_ERR("Setup leading failed!\n");
1339 #ifndef BNX2X_STOP_ON_ERROR
1347 if (CHIP_IS_E1H(bp))
1348 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1349 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1350 bp->flags |= MF_FUNC_DIS;
1353 if (bp->state == BNX2X_STATE_OPEN) {
1355 /* Enable Timer scan */
1356 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
1358 for_each_nondefault_queue(bp, i) {
1359 rc = bnx2x_setup_multi(bp, i);
1369 bnx2x_set_eth_mac_addr_e1(bp, 1);
1371 bnx2x_set_eth_mac_addr_e1h(bp, 1);
1373 /* Set iSCSI L2 MAC */
1374 mutex_lock(&bp->cnic_mutex);
1375 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
1376 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
1377 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
1378 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
1381 mutex_unlock(&bp->cnic_mutex);
1386 bnx2x_initial_phy_init(bp, load_mode);
1388 /* Start fast path */
1389 switch (load_mode) {
1391 if (bp->state == BNX2X_STATE_OPEN) {
1392 /* Tx queue should be only reenabled */
1393 netif_tx_wake_all_queues(bp->dev);
1395 /* Initialize the receive filter. */
1396 bnx2x_set_rx_mode(bp->dev);
1400 netif_tx_start_all_queues(bp->dev);
1401 if (bp->state != BNX2X_STATE_OPEN)
1402 netif_tx_disable(bp->dev);
1403 /* Initialize the receive filter. */
1404 bnx2x_set_rx_mode(bp->dev);
1408 /* Initialize the receive filter. */
1409 bnx2x_set_rx_mode(bp->dev);
1410 bp->state = BNX2X_STATE_DIAG;
1418 bnx2x__link_status_update(bp);
1420 /* start the timer */
1421 mod_timer(&bp->timer, jiffies + bp->current_interval);
1424 bnx2x_setup_cnic_irq_info(bp);
1425 if (bp->state == BNX2X_STATE_OPEN)
1426 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1428 bnx2x_inc_load_cnt(bp);
1434 /* Disable Timer scan */
1435 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1438 bnx2x_int_disable_sync(bp, 1);
1439 if (!BP_NOMCP(bp)) {
1440 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
1441 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
1444 /* Free SKBs, SGEs, TPA pool and driver internals */
1445 bnx2x_free_skbs(bp);
1446 for_each_queue(bp, i)
1447 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1450 bnx2x_free_irq(bp, false);
1452 bnx2x_napi_disable(bp);
1453 for_each_queue(bp, i)
1454 netif_napi_del(&bnx2x_fp(bp, i, napi));
1460 /* must be called with rtnl_lock */
1461 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1465 if (bp->state == BNX2X_STATE_CLOSED) {
1466 /* Interface has been removed - nothing to recover */
1467 bp->recovery_state = BNX2X_RECOVERY_DONE;
1469 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1476 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1478 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1480 /* Set "drop all" */
1481 bp->rx_mode = BNX2X_RX_MODE_NONE;
1482 bnx2x_set_storm_rx_mode(bp);
1484 /* Disable HW interrupts, NAPI and Tx */
1485 bnx2x_netif_stop(bp, 1);
1486 netif_carrier_off(bp->dev);
1488 del_timer_sync(&bp->timer);
1489 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
1490 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1491 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1494 bnx2x_free_irq(bp, false);
1496 /* Cleanup the chip if needed */
1497 if (unload_mode != UNLOAD_RECOVERY)
1498 bnx2x_chip_cleanup(bp, unload_mode);
1502 /* Free SKBs, SGEs, TPA pool and driver internals */
1503 bnx2x_free_skbs(bp);
1504 for_each_queue(bp, i)
1505 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1506 for_each_queue(bp, i)
1507 netif_napi_del(&bnx2x_fp(bp, i, napi));
1510 bp->state = BNX2X_STATE_CLOSED;
1512 /* The last driver must disable a "close the gate" if there is no
1513 * parity attention or "process kill" pending.
1515 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1516 bnx2x_reset_is_done(bp))
1517 bnx2x_disable_close_the_gate(bp);
1519 /* Reset MCP mail box sequence if there is on going recovery */
1520 if (unload_mode == UNLOAD_RECOVERY)
1525 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1529 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1533 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1534 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1535 PCI_PM_CTRL_PME_STATUS));
1537 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1538 /* delay required during transition out of D3hot */
1543 /* If there are other clients above don't
1544 shut down the power */
1545 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1547 /* Don't shut down the power for emulation and FPGA */
1548 if (CHIP_REV_IS_SLOW(bp))
1551 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1555 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1557 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1560 /* No more memory access after this point until
1561 * device is brought back to D0.
1574 * net_device service functions
1577 static int bnx2x_poll(struct napi_struct *napi, int budget)
1580 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1582 struct bnx2x *bp = fp->bp;
1585 #ifdef BNX2X_STOP_ON_ERROR
1586 if (unlikely(bp->panic)) {
1587 napi_complete(napi);
1592 if (bnx2x_has_tx_work(fp))
1595 if (bnx2x_has_rx_work(fp)) {
1596 work_done += bnx2x_rx_int(fp, budget - work_done);
1598 /* must not complete if we consumed full budget */
1599 if (work_done >= budget)
1603 /* Fall out from the NAPI loop if needed */
1604 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1605 bnx2x_update_fpsb_idx(fp);
1606 /* bnx2x_has_rx_work() reads the status block, thus we need
1607 * to ensure that status block indices have been actually read
1608 * (bnx2x_update_fpsb_idx) prior to this check
1609 * (bnx2x_has_rx_work) so that we won't write the "newer"
1610 * value of the status block to IGU (if there was a DMA right
1611 * after bnx2x_has_rx_work and if there is no rmb, the memory
1612 * reading (bnx2x_update_fpsb_idx) may be postponed to right
1613 * before bnx2x_ack_sb). In this case there will never be
1614 * another interrupt until there is another update of the
1615 * status block, while there is still unhandled work.
1619 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1620 napi_complete(napi);
1621 /* Re-enable interrupts */
1622 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1623 le16_to_cpu(fp->fp_c_idx),
1625 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1626 le16_to_cpu(fp->fp_u_idx),
1637 /* we split the first BD into headers and data BDs
1638 * to ease the pain of our fellow microcode engineers
1639 * we use one mapping for both BDs
1640 * So far this has only been observed to happen
1641 * in Other Operating Systems(TM)
1643 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1644 struct bnx2x_fastpath *fp,
1645 struct sw_tx_bd *tx_buf,
1646 struct eth_tx_start_bd **tx_bd, u16 hlen,
1647 u16 bd_prod, int nbd)
1649 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1650 struct eth_tx_bd *d_tx_bd;
1652 int old_len = le16_to_cpu(h_tx_bd->nbytes);
1654 /* first fix first BD */
1655 h_tx_bd->nbd = cpu_to_le16(nbd);
1656 h_tx_bd->nbytes = cpu_to_le16(hlen);
1658 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1659 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1660 h_tx_bd->addr_lo, h_tx_bd->nbd);
1662 /* now get a new data BD
1663 * (after the pbd) and fill it */
1664 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1665 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1667 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1668 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1670 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1671 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1672 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1674 /* this marks the BD as one that has no individual mapping */
1675 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1677 DP(NETIF_MSG_TX_QUEUED,
1678 "TSO split data size is %d (%x:%x)\n",
1679 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1682 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1687 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1690 csum = (u16) ~csum_fold(csum_sub(csum,
1691 csum_partial(t_header - fix, fix, 0)));
1694 csum = (u16) ~csum_fold(csum_add(csum,
1695 csum_partial(t_header, -fix, 0)));
1697 return swab16(csum);
1700 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1704 if (skb->ip_summed != CHECKSUM_PARTIAL)
1708 if (skb->protocol == htons(ETH_P_IPV6)) {
1710 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1711 rc |= XMIT_CSUM_TCP;
1715 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1716 rc |= XMIT_CSUM_TCP;
1720 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
1721 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
1723 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1724 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
1729 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1730 /* check if packet requires linearization (packet is too fragmented)
1731 no need to check fragmentation if page size > 8K (there will be no
1732 violation to FW restrictions) */
1733 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1738 int first_bd_sz = 0;
1740 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1741 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1743 if (xmit_type & XMIT_GSO) {
1744 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
1745 /* Check if LSO packet needs to be copied:
1746 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1747 int wnd_size = MAX_FETCH_BD - 3;
1748 /* Number of windows to check */
1749 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
1754 /* Headers length */
1755 hlen = (int)(skb_transport_header(skb) - skb->data) +
1758 /* Amount of data (w/o headers) on linear part of SKB*/
1759 first_bd_sz = skb_headlen(skb) - hlen;
1761 wnd_sum = first_bd_sz;
1763 /* Calculate the first sum - it's special */
1764 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
1766 skb_shinfo(skb)->frags[frag_idx].size;
1768 /* If there was data on linear skb data - check it */
1769 if (first_bd_sz > 0) {
1770 if (unlikely(wnd_sum < lso_mss)) {
1775 wnd_sum -= first_bd_sz;
1778 /* Others are easier: run through the frag list and
1779 check all windows */
1780 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
1782 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
1784 if (unlikely(wnd_sum < lso_mss)) {
1789 skb_shinfo(skb)->frags[wnd_idx].size;
1792 /* in non-LSO too fragmented packet should always
1799 if (unlikely(to_copy))
1800 DP(NETIF_MSG_TX_QUEUED,
1801 "Linearization IS REQUIRED for %s packet. "
1802 "num_frags %d hlen %d first_bd_sz %d\n",
1803 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
1804 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
1810 /* called with netif_tx_lock
1811 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1812 * netif_wake_queue()
1814 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1816 struct bnx2x *bp = netdev_priv(dev);
1817 struct bnx2x_fastpath *fp;
1818 struct netdev_queue *txq;
1819 struct sw_tx_bd *tx_buf;
1820 struct eth_tx_start_bd *tx_start_bd;
1821 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
1822 struct eth_tx_parse_bd *pbd = NULL;
1823 u16 pkt_prod, bd_prod;
1826 u32 xmit_type = bnx2x_xmit_type(bp, skb);
1829 __le16 pkt_size = 0;
1831 u8 mac_type = UNICAST_ADDRESS;
1833 #ifdef BNX2X_STOP_ON_ERROR
1834 if (unlikely(bp->panic))
1835 return NETDEV_TX_BUSY;
1838 fp_index = skb_get_queue_mapping(skb);
1839 txq = netdev_get_tx_queue(dev, fp_index);
1841 fp = &bp->fp[fp_index];
1843 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
1844 fp->eth_q_stats.driver_xoff++;
1845 netif_tx_stop_queue(txq);
1846 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
1847 return NETDEV_TX_BUSY;
1850 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
1851 " gso type %x xmit_type %x\n",
1852 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
1853 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
1855 eth = (struct ethhdr *)skb->data;
1857 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
1858 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
1859 if (is_broadcast_ether_addr(eth->h_dest))
1860 mac_type = BROADCAST_ADDRESS;
1862 mac_type = MULTICAST_ADDRESS;
1865 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1866 /* First, check if we need to linearize the skb (due to FW
1867 restrictions). No need to check fragmentation if page size > 8K
1868 (there will be no violation to FW restrictions) */
1869 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
1870 /* Statistics of linearization */
1872 if (skb_linearize(skb) != 0) {
1873 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
1874 "silently dropping this SKB\n");
1875 dev_kfree_skb_any(skb);
1876 return NETDEV_TX_OK;
1882 Please read carefully. First we use one BD which we mark as start,
1883 then we have a parsing info BD (used for TSO or xsum),
1884 and only then we have the rest of the TSO BDs.
1885 (don't forget to mark the last one as last,
1886 and to unmap only AFTER you write to the BD ...)
1887 And above all, all pdb sizes are in words - NOT DWORDS!
1890 pkt_prod = fp->tx_pkt_prod++;
1891 bd_prod = TX_BD(fp->tx_bd_prod);
1893 /* get a tx_buf and first BD */
1894 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
1895 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
1897 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
1898 tx_start_bd->general_data = (mac_type <<
1899 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
1901 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
1903 /* remember the first BD of the packet */
1904 tx_buf->first_bd = fp->tx_bd_prod;
1908 DP(NETIF_MSG_TX_QUEUED,
1909 "sending pkt %u @%p next_idx %u bd %u @%p\n",
1910 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
1913 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
1914 (bp->flags & HW_VLAN_TX_FLAG)) {
1915 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
1916 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
1919 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
1921 /* turn on parsing and get a BD */
1922 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1923 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
1925 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
1927 if (xmit_type & XMIT_CSUM) {
1928 hlen = (skb_network_header(skb) - skb->data) / 2;
1930 /* for now NS flag is not used in Linux */
1932 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1933 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
1935 pbd->ip_hlen = (skb_transport_header(skb) -
1936 skb_network_header(skb)) / 2;
1938 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
1940 pbd->total_hlen = cpu_to_le16(hlen);
1943 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
1945 if (xmit_type & XMIT_CSUM_V4)
1946 tx_start_bd->bd_flags.as_bitfield |=
1947 ETH_TX_BD_FLAGS_IP_CSUM;
1949 tx_start_bd->bd_flags.as_bitfield |=
1950 ETH_TX_BD_FLAGS_IPV6;
1952 if (xmit_type & XMIT_CSUM_TCP) {
1953 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
1956 s8 fix = SKB_CS_OFF(skb); /* signed! */
1958 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
1960 DP(NETIF_MSG_TX_QUEUED,
1961 "hlen %d fix %d csum before fix %x\n",
1962 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
1964 /* HW bug: fixup the CSUM */
1965 pbd->tcp_pseudo_csum =
1966 bnx2x_csum_fix(skb_transport_header(skb),
1969 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
1970 pbd->tcp_pseudo_csum);
1974 mapping = dma_map_single(&bp->pdev->dev, skb->data,
1975 skb_headlen(skb), DMA_TO_DEVICE);
1977 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1978 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1979 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
1980 tx_start_bd->nbd = cpu_to_le16(nbd);
1981 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
1982 pkt_size = tx_start_bd->nbytes;
1984 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
1985 " nbytes %d flags %x vlan %x\n",
1986 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
1987 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
1988 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
1990 if (xmit_type & XMIT_GSO) {
1992 DP(NETIF_MSG_TX_QUEUED,
1993 "TSO packet len %d hlen %d total len %d tso size %d\n",
1994 skb->len, hlen, skb_headlen(skb),
1995 skb_shinfo(skb)->gso_size);
1997 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
1999 if (unlikely(skb_headlen(skb) > hlen))
2000 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2001 hlen, bd_prod, ++nbd);
2003 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2004 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2005 pbd->tcp_flags = pbd_tcp_flags(skb);
2007 if (xmit_type & XMIT_GSO_V4) {
2008 pbd->ip_id = swab16(ip_hdr(skb)->id);
2009 pbd->tcp_pseudo_csum =
2010 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2012 0, IPPROTO_TCP, 0));
2015 pbd->tcp_pseudo_csum =
2016 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2017 &ipv6_hdr(skb)->daddr,
2018 0, IPPROTO_TCP, 0));
2020 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
2022 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2024 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2025 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2027 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2028 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2029 if (total_pkt_bd == NULL)
2030 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2032 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2034 frag->size, DMA_TO_DEVICE);
2036 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2037 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2038 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2039 le16_add_cpu(&pkt_size, frag->size);
2041 DP(NETIF_MSG_TX_QUEUED,
2042 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2043 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2044 le16_to_cpu(tx_data_bd->nbytes));
2047 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2049 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2051 /* now send a tx doorbell, counting the next BD
2052 * if the packet contains or ends with it
2054 if (TX_BD_POFF(bd_prod) < nbd)
2057 if (total_pkt_bd != NULL)
2058 total_pkt_bd->total_pkt_bytes = pkt_size;
2061 DP(NETIF_MSG_TX_QUEUED,
2062 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2063 " tcp_flags %x xsum %x seq %u hlen %u\n",
2064 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
2065 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
2066 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
2068 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2071 * Make sure that the BD data is updated before updating the producer
2072 * since FW might read the BD right after the producer is updated.
2073 * This is only applicable for weak-ordered memory model archs such
2074 * as IA-64. The following barrier is also mandatory since FW will
2075 * assumes packets must have BDs.
2079 fp->tx_db.data.prod += nbd;
2081 DOORBELL(bp, fp->index, fp->tx_db.raw);
2085 fp->tx_bd_prod += nbd;
2087 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2088 netif_tx_stop_queue(txq);
2090 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2091 * ordering of set_bit() in netif_tx_stop_queue() and read of
2095 fp->eth_q_stats.driver_xoff++;
2096 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2097 netif_tx_wake_queue(txq);
2101 return NETDEV_TX_OK;
2103 /* called with rtnl_lock */
2104 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2106 struct sockaddr *addr = p;
2107 struct bnx2x *bp = netdev_priv(dev);
2109 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2112 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2113 if (netif_running(dev)) {
2115 bnx2x_set_eth_mac_addr_e1(bp, 1);
2117 bnx2x_set_eth_mac_addr_e1h(bp, 1);
2123 /* called with rtnl_lock */
2124 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2126 struct bnx2x *bp = netdev_priv(dev);
2129 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2130 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2134 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2135 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2138 /* This does not race with packet allocation
2139 * because the actual alloc size is
2140 * only updated as part of load
2144 if (netif_running(dev)) {
2145 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2146 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2152 void bnx2x_tx_timeout(struct net_device *dev)
2154 struct bnx2x *bp = netdev_priv(dev);
2156 #ifdef BNX2X_STOP_ON_ERROR
2160 /* This allows the netif to be shutdown gracefully before resetting */
2161 schedule_delayed_work(&bp->reset_task, 0);
2165 /* called with rtnl_lock */
2166 void bnx2x_vlan_rx_register(struct net_device *dev,
2167 struct vlan_group *vlgrp)
2169 struct bnx2x *bp = netdev_priv(dev);
2173 /* Set flags according to the required capabilities */
2174 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
2176 if (dev->features & NETIF_F_HW_VLAN_TX)
2177 bp->flags |= HW_VLAN_TX_FLAG;
2179 if (dev->features & NETIF_F_HW_VLAN_RX)
2180 bp->flags |= HW_VLAN_RX_FLAG;
2182 if (netif_running(dev))
2183 bnx2x_set_client_config(bp);
2187 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2189 struct net_device *dev = pci_get_drvdata(pdev);
2193 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2196 bp = netdev_priv(dev);
2200 pci_save_state(pdev);
2202 if (!netif_running(dev)) {
2207 netif_device_detach(dev);
2209 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2211 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2218 int bnx2x_resume(struct pci_dev *pdev)
2220 struct net_device *dev = pci_get_drvdata(pdev);
2225 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2228 bp = netdev_priv(dev);
2230 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2231 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2237 pci_restore_state(pdev);
2239 if (!netif_running(dev)) {
2244 bnx2x_set_power_state(bp, PCI_D0);
2245 netif_device_attach(dev);
2247 rc = bnx2x_nic_load(bp, LOAD_OPEN);