1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define FW_BUF_SIZE 0x10000
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 #define DRV_MODULE_VERSION "1.7.8"
60 #define DRV_MODULE_RELDATE "July 10, 2008"
62 #define RUN_AT(x) (jiffies + (x))
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (5*HZ)
67 static char version[] __devinitdata =
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
75 static int disable_msi = 0;
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
93 /* indexed by board_t, above */
96 } board_info[] __devinitdata = {
97 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
98 { "HP NC370T Multifunction Gigabit Server Adapter" },
99 { "HP NC370i Multifunction Gigabit Server Adapter" },
100 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
101 { "HP NC370F Multifunction Gigabit Server Adapter" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
103 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
104 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
105 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
106 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
109 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
128 { PCI_VENDOR_ID_BROADCOM, 0x163b,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
133 static struct flash_spec flash_table[] =
135 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
136 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
138 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
139 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
140 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
142 /* Expansion entry 0001 */
143 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
144 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
145 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
147 /* Saifun SA25F010 (non-buffered flash) */
148 /* strap, cfg1, & write1 need updates */
149 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
150 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
151 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
152 "Non-buffered flash (128kB)"},
153 /* Saifun SA25F020 (non-buffered flash) */
154 /* strap, cfg1, & write1 need updates */
155 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
156 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
157 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
158 "Non-buffered flash (256kB)"},
159 /* Expansion entry 0100 */
160 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
161 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
164 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
165 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
166 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
167 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
168 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
169 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
170 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
171 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
172 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
173 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
174 /* Saifun SA25F005 (non-buffered flash) */
175 /* strap, cfg1, & write1 need updates */
176 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
177 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
179 "Non-buffered flash (64kB)"},
181 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
182 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
183 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
185 /* Expansion entry 1001 */
186 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
187 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
188 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190 /* Expansion entry 1010 */
191 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
192 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
193 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
195 /* ATMEL AT45DB011B (buffered flash) */
196 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
197 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
198 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
199 "Buffered flash (128kB)"},
200 /* Expansion entry 1100 */
201 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
202 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
203 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 /* Expansion entry 1101 */
206 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
207 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
208 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210 /* Ateml Expansion entry 1110 */
211 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
212 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
213 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
214 "Entry 1110 (Atmel)"},
215 /* ATMEL AT45DB021B (buffered flash) */
216 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
217 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
218 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
219 "Buffered flash (256kB)"},
222 static struct flash_spec flash_5709 = {
223 .flags = BNX2_NV_BUFFERED,
224 .page_bits = BCM5709_FLASH_PAGE_BITS,
225 .page_size = BCM5709_FLASH_PAGE_SIZE,
226 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
227 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
228 .name = "5709 Buffered flash (256kB)",
231 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
233 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
239 /* The ring uses 256 indices for 255 entries, one of them
240 * needs to be skipped.
242 diff = txr->tx_prod - txr->tx_cons;
243 if (unlikely(diff >= TX_DESC_CNT)) {
245 if (diff == TX_DESC_CNT)
246 diff = MAX_TX_DESC_CNT;
248 return (bp->tx_ring_size - diff);
252 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
256 spin_lock_bh(&bp->indirect_lock);
257 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
258 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
259 spin_unlock_bh(&bp->indirect_lock);
264 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
266 spin_lock_bh(&bp->indirect_lock);
267 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
268 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
269 spin_unlock_bh(&bp->indirect_lock);
273 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
275 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
279 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
281 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
285 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
288 spin_lock_bh(&bp->indirect_lock);
289 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
292 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
293 REG_WR(bp, BNX2_CTX_CTX_CTRL,
294 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
295 for (i = 0; i < 5; i++) {
297 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
298 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
303 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
304 REG_WR(bp, BNX2_CTX_DATA, val);
306 spin_unlock_bh(&bp->indirect_lock);
310 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
315 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
316 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
317 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
319 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
320 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
325 val1 = (bp->phy_addr << 21) | (reg << 16) |
326 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
327 BNX2_EMAC_MDIO_COMM_START_BUSY;
328 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
330 for (i = 0; i < 50; i++) {
333 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
334 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
337 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
338 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
344 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
353 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
354 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
355 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
357 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
358 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
367 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
372 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
373 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
374 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
376 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
377 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
382 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
383 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
384 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
385 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
387 for (i = 0; i < 50; i++) {
390 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
391 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
397 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
402 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
403 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
404 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
406 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
407 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
416 bnx2_disable_int(struct bnx2 *bp)
419 struct bnx2_napi *bnapi;
421 for (i = 0; i < bp->irq_nvecs; i++) {
422 bnapi = &bp->bnx2_napi[i];
423 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
424 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
426 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
430 bnx2_enable_int(struct bnx2 *bp)
433 struct bnx2_napi *bnapi;
435 for (i = 0; i < bp->irq_nvecs; i++) {
436 bnapi = &bp->bnx2_napi[i];
438 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
439 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
440 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
441 bnapi->last_status_idx);
443 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
444 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
445 bnapi->last_status_idx);
447 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
451 bnx2_disable_int_sync(struct bnx2 *bp)
455 atomic_inc(&bp->intr_sem);
456 bnx2_disable_int(bp);
457 for (i = 0; i < bp->irq_nvecs; i++)
458 synchronize_irq(bp->irq_tbl[i].vector);
462 bnx2_napi_disable(struct bnx2 *bp)
466 for (i = 0; i < bp->irq_nvecs; i++)
467 napi_disable(&bp->bnx2_napi[i].napi);
471 bnx2_napi_enable(struct bnx2 *bp)
475 for (i = 0; i < bp->irq_nvecs; i++)
476 napi_enable(&bp->bnx2_napi[i].napi);
480 bnx2_netif_stop(struct bnx2 *bp)
482 bnx2_disable_int_sync(bp);
483 if (netif_running(bp->dev)) {
484 bnx2_napi_disable(bp);
485 netif_tx_disable(bp->dev);
486 bp->dev->trans_start = jiffies; /* prevent tx timeout */
491 bnx2_netif_start(struct bnx2 *bp)
493 if (atomic_dec_and_test(&bp->intr_sem)) {
494 if (netif_running(bp->dev)) {
495 netif_wake_queue(bp->dev);
496 bnx2_napi_enable(bp);
503 bnx2_free_tx_mem(struct bnx2 *bp)
507 for (i = 0; i < bp->num_tx_rings; i++) {
508 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
509 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
511 if (txr->tx_desc_ring) {
512 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
514 txr->tx_desc_mapping);
515 txr->tx_desc_ring = NULL;
517 kfree(txr->tx_buf_ring);
518 txr->tx_buf_ring = NULL;
523 bnx2_free_rx_mem(struct bnx2 *bp)
527 for (i = 0; i < bp->num_rx_rings; i++) {
528 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
529 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
532 for (j = 0; j < bp->rx_max_ring; j++) {
533 if (rxr->rx_desc_ring[j])
534 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
535 rxr->rx_desc_ring[j],
536 rxr->rx_desc_mapping[j]);
537 rxr->rx_desc_ring[j] = NULL;
539 if (rxr->rx_buf_ring)
540 vfree(rxr->rx_buf_ring);
541 rxr->rx_buf_ring = NULL;
543 for (j = 0; j < bp->rx_max_pg_ring; j++) {
544 if (rxr->rx_pg_desc_ring[j])
545 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
546 rxr->rx_pg_desc_ring[i],
547 rxr->rx_pg_desc_mapping[i]);
548 rxr->rx_pg_desc_ring[i] = NULL;
551 vfree(rxr->rx_pg_ring);
552 rxr->rx_pg_ring = NULL;
557 bnx2_alloc_tx_mem(struct bnx2 *bp)
561 for (i = 0; i < bp->num_tx_rings; i++) {
562 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
563 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
565 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
566 if (txr->tx_buf_ring == NULL)
570 pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
571 &txr->tx_desc_mapping);
572 if (txr->tx_desc_ring == NULL)
579 bnx2_alloc_rx_mem(struct bnx2 *bp)
583 for (i = 0; i < bp->num_rx_rings; i++) {
584 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
585 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
589 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
590 if (rxr->rx_buf_ring == NULL)
593 memset(rxr->rx_buf_ring, 0,
594 SW_RXBD_RING_SIZE * bp->rx_max_ring);
596 for (j = 0; j < bp->rx_max_ring; j++) {
597 rxr->rx_desc_ring[j] =
598 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
599 &rxr->rx_desc_mapping[j]);
600 if (rxr->rx_desc_ring[j] == NULL)
605 if (bp->rx_pg_ring_size) {
606 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
608 if (rxr->rx_pg_ring == NULL)
611 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
615 for (j = 0; j < bp->rx_max_pg_ring; j++) {
616 rxr->rx_pg_desc_ring[j] =
617 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
618 &rxr->rx_pg_desc_mapping[j]);
619 if (rxr->rx_pg_desc_ring[j] == NULL)
628 bnx2_free_mem(struct bnx2 *bp)
631 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
633 bnx2_free_tx_mem(bp);
634 bnx2_free_rx_mem(bp);
636 for (i = 0; i < bp->ctx_pages; i++) {
637 if (bp->ctx_blk[i]) {
638 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
640 bp->ctx_blk_mapping[i]);
641 bp->ctx_blk[i] = NULL;
644 if (bnapi->status_blk.msi) {
645 pci_free_consistent(bp->pdev, bp->status_stats_size,
646 bnapi->status_blk.msi,
647 bp->status_blk_mapping);
648 bnapi->status_blk.msi = NULL;
649 bp->stats_blk = NULL;
654 bnx2_alloc_mem(struct bnx2 *bp)
656 int i, status_blk_size, err;
657 struct bnx2_napi *bnapi;
660 /* Combine status and statistics blocks into one allocation. */
661 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
662 if (bp->flags & BNX2_FLAG_MSIX_CAP)
663 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
664 BNX2_SBLK_MSIX_ALIGN_SIZE);
665 bp->status_stats_size = status_blk_size +
666 sizeof(struct statistics_block);
668 status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
669 &bp->status_blk_mapping);
670 if (status_blk == NULL)
673 memset(status_blk, 0, bp->status_stats_size);
675 bnapi = &bp->bnx2_napi[0];
676 bnapi->status_blk.msi = status_blk;
677 bnapi->hw_tx_cons_ptr =
678 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
679 bnapi->hw_rx_cons_ptr =
680 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
681 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
682 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
683 struct status_block_msix *sblk;
685 bnapi = &bp->bnx2_napi[i];
687 sblk = (void *) (status_blk +
688 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
689 bnapi->status_blk.msix = sblk;
690 bnapi->hw_tx_cons_ptr =
691 &sblk->status_tx_quick_consumer_index;
692 bnapi->hw_rx_cons_ptr =
693 &sblk->status_rx_quick_consumer_index;
694 bnapi->int_num = i << 24;
698 bp->stats_blk = status_blk + status_blk_size;
700 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
702 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
703 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
704 if (bp->ctx_pages == 0)
706 for (i = 0; i < bp->ctx_pages; i++) {
707 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
709 &bp->ctx_blk_mapping[i]);
710 if (bp->ctx_blk[i] == NULL)
715 err = bnx2_alloc_rx_mem(bp);
719 err = bnx2_alloc_tx_mem(bp);
731 bnx2_report_fw_link(struct bnx2 *bp)
733 u32 fw_link_status = 0;
735 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
741 switch (bp->line_speed) {
743 if (bp->duplex == DUPLEX_HALF)
744 fw_link_status = BNX2_LINK_STATUS_10HALF;
746 fw_link_status = BNX2_LINK_STATUS_10FULL;
749 if (bp->duplex == DUPLEX_HALF)
750 fw_link_status = BNX2_LINK_STATUS_100HALF;
752 fw_link_status = BNX2_LINK_STATUS_100FULL;
755 if (bp->duplex == DUPLEX_HALF)
756 fw_link_status = BNX2_LINK_STATUS_1000HALF;
758 fw_link_status = BNX2_LINK_STATUS_1000FULL;
761 if (bp->duplex == DUPLEX_HALF)
762 fw_link_status = BNX2_LINK_STATUS_2500HALF;
764 fw_link_status = BNX2_LINK_STATUS_2500FULL;
768 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
771 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
773 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
774 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
776 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
777 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
778 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
780 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
784 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
786 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
790 bnx2_xceiver_str(struct bnx2 *bp)
792 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
793 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
798 bnx2_report_link(struct bnx2 *bp)
801 netif_carrier_on(bp->dev);
802 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
803 bnx2_xceiver_str(bp));
805 printk("%d Mbps ", bp->line_speed);
807 if (bp->duplex == DUPLEX_FULL)
808 printk("full duplex");
810 printk("half duplex");
813 if (bp->flow_ctrl & FLOW_CTRL_RX) {
814 printk(", receive ");
815 if (bp->flow_ctrl & FLOW_CTRL_TX)
816 printk("& transmit ");
819 printk(", transmit ");
821 printk("flow control ON");
826 netif_carrier_off(bp->dev);
827 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
828 bnx2_xceiver_str(bp));
831 bnx2_report_fw_link(bp);
835 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
837 u32 local_adv, remote_adv;
840 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
841 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
843 if (bp->duplex == DUPLEX_FULL) {
844 bp->flow_ctrl = bp->req_flow_ctrl;
849 if (bp->duplex != DUPLEX_FULL) {
853 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
854 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
857 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
858 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
859 bp->flow_ctrl |= FLOW_CTRL_TX;
860 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
861 bp->flow_ctrl |= FLOW_CTRL_RX;
865 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
866 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
868 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
869 u32 new_local_adv = 0;
870 u32 new_remote_adv = 0;
872 if (local_adv & ADVERTISE_1000XPAUSE)
873 new_local_adv |= ADVERTISE_PAUSE_CAP;
874 if (local_adv & ADVERTISE_1000XPSE_ASYM)
875 new_local_adv |= ADVERTISE_PAUSE_ASYM;
876 if (remote_adv & ADVERTISE_1000XPAUSE)
877 new_remote_adv |= ADVERTISE_PAUSE_CAP;
878 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
879 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
881 local_adv = new_local_adv;
882 remote_adv = new_remote_adv;
885 /* See Table 28B-3 of 802.3ab-1999 spec. */
886 if (local_adv & ADVERTISE_PAUSE_CAP) {
887 if(local_adv & ADVERTISE_PAUSE_ASYM) {
888 if (remote_adv & ADVERTISE_PAUSE_CAP) {
889 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
891 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
892 bp->flow_ctrl = FLOW_CTRL_RX;
896 if (remote_adv & ADVERTISE_PAUSE_CAP) {
897 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
901 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
902 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
903 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
905 bp->flow_ctrl = FLOW_CTRL_TX;
911 bnx2_5709s_linkup(struct bnx2 *bp)
917 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
918 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
919 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
921 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
922 bp->line_speed = bp->req_line_speed;
923 bp->duplex = bp->req_duplex;
926 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
928 case MII_BNX2_GP_TOP_AN_SPEED_10:
929 bp->line_speed = SPEED_10;
931 case MII_BNX2_GP_TOP_AN_SPEED_100:
932 bp->line_speed = SPEED_100;
934 case MII_BNX2_GP_TOP_AN_SPEED_1G:
935 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
936 bp->line_speed = SPEED_1000;
938 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
939 bp->line_speed = SPEED_2500;
942 if (val & MII_BNX2_GP_TOP_AN_FD)
943 bp->duplex = DUPLEX_FULL;
945 bp->duplex = DUPLEX_HALF;
950 bnx2_5708s_linkup(struct bnx2 *bp)
955 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
956 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
957 case BCM5708S_1000X_STAT1_SPEED_10:
958 bp->line_speed = SPEED_10;
960 case BCM5708S_1000X_STAT1_SPEED_100:
961 bp->line_speed = SPEED_100;
963 case BCM5708S_1000X_STAT1_SPEED_1G:
964 bp->line_speed = SPEED_1000;
966 case BCM5708S_1000X_STAT1_SPEED_2G5:
967 bp->line_speed = SPEED_2500;
970 if (val & BCM5708S_1000X_STAT1_FD)
971 bp->duplex = DUPLEX_FULL;
973 bp->duplex = DUPLEX_HALF;
979 bnx2_5706s_linkup(struct bnx2 *bp)
981 u32 bmcr, local_adv, remote_adv, common;
984 bp->line_speed = SPEED_1000;
986 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
987 if (bmcr & BMCR_FULLDPLX) {
988 bp->duplex = DUPLEX_FULL;
991 bp->duplex = DUPLEX_HALF;
994 if (!(bmcr & BMCR_ANENABLE)) {
998 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
999 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1001 common = local_adv & remote_adv;
1002 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1004 if (common & ADVERTISE_1000XFULL) {
1005 bp->duplex = DUPLEX_FULL;
1008 bp->duplex = DUPLEX_HALF;
1016 bnx2_copper_linkup(struct bnx2 *bp)
1020 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1021 if (bmcr & BMCR_ANENABLE) {
1022 u32 local_adv, remote_adv, common;
1024 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1025 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1027 common = local_adv & (remote_adv >> 2);
1028 if (common & ADVERTISE_1000FULL) {
1029 bp->line_speed = SPEED_1000;
1030 bp->duplex = DUPLEX_FULL;
1032 else if (common & ADVERTISE_1000HALF) {
1033 bp->line_speed = SPEED_1000;
1034 bp->duplex = DUPLEX_HALF;
1037 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1038 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1040 common = local_adv & remote_adv;
1041 if (common & ADVERTISE_100FULL) {
1042 bp->line_speed = SPEED_100;
1043 bp->duplex = DUPLEX_FULL;
1045 else if (common & ADVERTISE_100HALF) {
1046 bp->line_speed = SPEED_100;
1047 bp->duplex = DUPLEX_HALF;
1049 else if (common & ADVERTISE_10FULL) {
1050 bp->line_speed = SPEED_10;
1051 bp->duplex = DUPLEX_FULL;
1053 else if (common & ADVERTISE_10HALF) {
1054 bp->line_speed = SPEED_10;
1055 bp->duplex = DUPLEX_HALF;
1064 if (bmcr & BMCR_SPEED100) {
1065 bp->line_speed = SPEED_100;
1068 bp->line_speed = SPEED_10;
1070 if (bmcr & BMCR_FULLDPLX) {
1071 bp->duplex = DUPLEX_FULL;
1074 bp->duplex = DUPLEX_HALF;
1082 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1084 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1086 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1087 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1090 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1091 u32 lo_water, hi_water;
1093 if (bp->flow_ctrl & FLOW_CTRL_TX)
1094 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1096 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1097 if (lo_water >= bp->rx_ring_size)
1100 hi_water = bp->rx_ring_size / 4;
1102 if (hi_water <= lo_water)
1105 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1106 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1110 else if (hi_water == 0)
1112 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1114 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1118 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1123 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1126 bnx2_init_rx_context(bp, cid);
1131 bnx2_set_mac_link(struct bnx2 *bp)
1135 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1136 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1137 (bp->duplex == DUPLEX_HALF)) {
1138 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1141 /* Configure the EMAC mode register. */
1142 val = REG_RD(bp, BNX2_EMAC_MODE);
1144 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1145 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1146 BNX2_EMAC_MODE_25G_MODE);
1149 switch (bp->line_speed) {
1151 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1152 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1157 val |= BNX2_EMAC_MODE_PORT_MII;
1160 val |= BNX2_EMAC_MODE_25G_MODE;
1163 val |= BNX2_EMAC_MODE_PORT_GMII;
1168 val |= BNX2_EMAC_MODE_PORT_GMII;
1171 /* Set the MAC to operate in the appropriate duplex mode. */
1172 if (bp->duplex == DUPLEX_HALF)
1173 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1174 REG_WR(bp, BNX2_EMAC_MODE, val);
1176 /* Enable/disable rx PAUSE. */
1177 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1179 if (bp->flow_ctrl & FLOW_CTRL_RX)
1180 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1181 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1183 /* Enable/disable tx PAUSE. */
1184 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1185 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1187 if (bp->flow_ctrl & FLOW_CTRL_TX)
1188 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1189 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1191 /* Acknowledge the interrupt. */
1192 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1194 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1195 bnx2_init_all_rx_contexts(bp);
1201 bnx2_enable_bmsr1(struct bnx2 *bp)
1203 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1204 (CHIP_NUM(bp) == CHIP_NUM_5709))
1205 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1206 MII_BNX2_BLK_ADDR_GP_STATUS);
1210 bnx2_disable_bmsr1(struct bnx2 *bp)
1212 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1213 (CHIP_NUM(bp) == CHIP_NUM_5709))
1214 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1215 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1219 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1224 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1227 if (bp->autoneg & AUTONEG_SPEED)
1228 bp->advertising |= ADVERTISED_2500baseX_Full;
1230 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1231 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1233 bnx2_read_phy(bp, bp->mii_up1, &up1);
1234 if (!(up1 & BCM5708S_UP1_2G5)) {
1235 up1 |= BCM5708S_UP1_2G5;
1236 bnx2_write_phy(bp, bp->mii_up1, up1);
1240 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1241 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1242 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1248 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1253 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1256 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1257 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1259 bnx2_read_phy(bp, bp->mii_up1, &up1);
1260 if (up1 & BCM5708S_UP1_2G5) {
1261 up1 &= ~BCM5708S_UP1_2G5;
1262 bnx2_write_phy(bp, bp->mii_up1, up1);
1266 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1267 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1268 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1274 bnx2_enable_forced_2g5(struct bnx2 *bp)
1278 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1281 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1284 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1285 MII_BNX2_BLK_ADDR_SERDES_DIG);
1286 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1287 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1288 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1289 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1291 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1292 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1293 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1295 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1296 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1297 bmcr |= BCM5708S_BMCR_FORCE_2500;
1300 if (bp->autoneg & AUTONEG_SPEED) {
1301 bmcr &= ~BMCR_ANENABLE;
1302 if (bp->req_duplex == DUPLEX_FULL)
1303 bmcr |= BMCR_FULLDPLX;
1305 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1309 bnx2_disable_forced_2g5(struct bnx2 *bp)
1313 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1316 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1319 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1320 MII_BNX2_BLK_ADDR_SERDES_DIG);
1321 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1322 val &= ~MII_BNX2_SD_MISC1_FORCE;
1323 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1325 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1326 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1327 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1329 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1330 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1331 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1334 if (bp->autoneg & AUTONEG_SPEED)
1335 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1336 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1340 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1344 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1345 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1347 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1349 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1353 bnx2_set_link(struct bnx2 *bp)
1358 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1363 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1366 link_up = bp->link_up;
1368 bnx2_enable_bmsr1(bp);
1369 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1370 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1371 bnx2_disable_bmsr1(bp);
1373 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1374 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1377 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1378 bnx2_5706s_force_link_dn(bp, 0);
1379 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1381 val = REG_RD(bp, BNX2_EMAC_STATUS);
1383 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1384 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1385 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1387 if ((val & BNX2_EMAC_STATUS_LINK) &&
1388 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1389 bmsr |= BMSR_LSTATUS;
1391 bmsr &= ~BMSR_LSTATUS;
1394 if (bmsr & BMSR_LSTATUS) {
1397 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1398 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1399 bnx2_5706s_linkup(bp);
1400 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1401 bnx2_5708s_linkup(bp);
1402 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1403 bnx2_5709s_linkup(bp);
1406 bnx2_copper_linkup(bp);
1408 bnx2_resolve_flow_ctrl(bp);
1411 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1412 (bp->autoneg & AUTONEG_SPEED))
1413 bnx2_disable_forced_2g5(bp);
1415 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1418 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1419 bmcr |= BMCR_ANENABLE;
1420 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1422 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1427 if (bp->link_up != link_up) {
1428 bnx2_report_link(bp);
1431 bnx2_set_mac_link(bp);
1437 bnx2_reset_phy(struct bnx2 *bp)
1442 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1444 #define PHY_RESET_MAX_WAIT 100
1445 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1448 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1449 if (!(reg & BMCR_RESET)) {
1454 if (i == PHY_RESET_MAX_WAIT) {
1461 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1465 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1466 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1468 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1469 adv = ADVERTISE_1000XPAUSE;
1472 adv = ADVERTISE_PAUSE_CAP;
1475 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1476 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1477 adv = ADVERTISE_1000XPSE_ASYM;
1480 adv = ADVERTISE_PAUSE_ASYM;
1483 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1484 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1485 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1488 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1494 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1497 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1499 u32 speed_arg = 0, pause_adv;
1501 pause_adv = bnx2_phy_get_pause_adv(bp);
1503 if (bp->autoneg & AUTONEG_SPEED) {
1504 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1505 if (bp->advertising & ADVERTISED_10baseT_Half)
1506 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1507 if (bp->advertising & ADVERTISED_10baseT_Full)
1508 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1509 if (bp->advertising & ADVERTISED_100baseT_Half)
1510 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1511 if (bp->advertising & ADVERTISED_100baseT_Full)
1512 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1513 if (bp->advertising & ADVERTISED_1000baseT_Full)
1514 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1515 if (bp->advertising & ADVERTISED_2500baseX_Full)
1516 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1518 if (bp->req_line_speed == SPEED_2500)
1519 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1520 else if (bp->req_line_speed == SPEED_1000)
1521 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1522 else if (bp->req_line_speed == SPEED_100) {
1523 if (bp->req_duplex == DUPLEX_FULL)
1524 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1526 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1527 } else if (bp->req_line_speed == SPEED_10) {
1528 if (bp->req_duplex == DUPLEX_FULL)
1529 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1531 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1535 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1536 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1537 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1538 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1540 if (port == PORT_TP)
1541 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1542 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1544 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1546 spin_unlock_bh(&bp->phy_lock);
1547 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1548 spin_lock_bh(&bp->phy_lock);
1554 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1559 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1560 return (bnx2_setup_remote_phy(bp, port));
1562 if (!(bp->autoneg & AUTONEG_SPEED)) {
1564 int force_link_down = 0;
1566 if (bp->req_line_speed == SPEED_2500) {
1567 if (!bnx2_test_and_enable_2g5(bp))
1568 force_link_down = 1;
1569 } else if (bp->req_line_speed == SPEED_1000) {
1570 if (bnx2_test_and_disable_2g5(bp))
1571 force_link_down = 1;
1573 bnx2_read_phy(bp, bp->mii_adv, &adv);
1574 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1576 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1577 new_bmcr = bmcr & ~BMCR_ANENABLE;
1578 new_bmcr |= BMCR_SPEED1000;
1580 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1581 if (bp->req_line_speed == SPEED_2500)
1582 bnx2_enable_forced_2g5(bp);
1583 else if (bp->req_line_speed == SPEED_1000) {
1584 bnx2_disable_forced_2g5(bp);
1585 new_bmcr &= ~0x2000;
1588 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1589 if (bp->req_line_speed == SPEED_2500)
1590 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1592 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1595 if (bp->req_duplex == DUPLEX_FULL) {
1596 adv |= ADVERTISE_1000XFULL;
1597 new_bmcr |= BMCR_FULLDPLX;
1600 adv |= ADVERTISE_1000XHALF;
1601 new_bmcr &= ~BMCR_FULLDPLX;
1603 if ((new_bmcr != bmcr) || (force_link_down)) {
1604 /* Force a link down visible on the other side */
1606 bnx2_write_phy(bp, bp->mii_adv, adv &
1607 ~(ADVERTISE_1000XFULL |
1608 ADVERTISE_1000XHALF));
1609 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1610 BMCR_ANRESTART | BMCR_ANENABLE);
1613 netif_carrier_off(bp->dev);
1614 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1615 bnx2_report_link(bp);
1617 bnx2_write_phy(bp, bp->mii_adv, adv);
1618 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1620 bnx2_resolve_flow_ctrl(bp);
1621 bnx2_set_mac_link(bp);
1626 bnx2_test_and_enable_2g5(bp);
1628 if (bp->advertising & ADVERTISED_1000baseT_Full)
1629 new_adv |= ADVERTISE_1000XFULL;
1631 new_adv |= bnx2_phy_get_pause_adv(bp);
1633 bnx2_read_phy(bp, bp->mii_adv, &adv);
1634 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1636 bp->serdes_an_pending = 0;
1637 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1638 /* Force a link down visible on the other side */
1640 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1641 spin_unlock_bh(&bp->phy_lock);
1643 spin_lock_bh(&bp->phy_lock);
1646 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1647 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1649 /* Speed up link-up time when the link partner
1650 * does not autonegotiate which is very common
1651 * in blade servers. Some blade servers use
1652 * IPMI for kerboard input and it's important
1653 * to minimize link disruptions. Autoneg. involves
1654 * exchanging base pages plus 3 next pages and
1655 * normally completes in about 120 msec.
1657 bp->current_interval = SERDES_AN_TIMEOUT;
1658 bp->serdes_an_pending = 1;
1659 mod_timer(&bp->timer, jiffies + bp->current_interval);
1661 bnx2_resolve_flow_ctrl(bp);
1662 bnx2_set_mac_link(bp);
1668 #define ETHTOOL_ALL_FIBRE_SPEED \
1669 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1670 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1671 (ADVERTISED_1000baseT_Full)
1673 #define ETHTOOL_ALL_COPPER_SPEED \
1674 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1675 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1676 ADVERTISED_1000baseT_Full)
1678 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1679 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1681 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1684 bnx2_set_default_remote_link(struct bnx2 *bp)
1688 if (bp->phy_port == PORT_TP)
1689 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1691 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1693 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1694 bp->req_line_speed = 0;
1695 bp->autoneg |= AUTONEG_SPEED;
1696 bp->advertising = ADVERTISED_Autoneg;
1697 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1698 bp->advertising |= ADVERTISED_10baseT_Half;
1699 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1700 bp->advertising |= ADVERTISED_10baseT_Full;
1701 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1702 bp->advertising |= ADVERTISED_100baseT_Half;
1703 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1704 bp->advertising |= ADVERTISED_100baseT_Full;
1705 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1706 bp->advertising |= ADVERTISED_1000baseT_Full;
1707 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1708 bp->advertising |= ADVERTISED_2500baseX_Full;
1711 bp->advertising = 0;
1712 bp->req_duplex = DUPLEX_FULL;
1713 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1714 bp->req_line_speed = SPEED_10;
1715 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1716 bp->req_duplex = DUPLEX_HALF;
1718 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1719 bp->req_line_speed = SPEED_100;
1720 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1721 bp->req_duplex = DUPLEX_HALF;
1723 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1724 bp->req_line_speed = SPEED_1000;
1725 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1726 bp->req_line_speed = SPEED_2500;
1731 bnx2_set_default_link(struct bnx2 *bp)
1733 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1734 bnx2_set_default_remote_link(bp);
1738 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1739 bp->req_line_speed = 0;
1740 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1743 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1745 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1746 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1747 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1749 bp->req_line_speed = bp->line_speed = SPEED_1000;
1750 bp->req_duplex = DUPLEX_FULL;
1753 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1757 bnx2_send_heart_beat(struct bnx2 *bp)
1762 spin_lock(&bp->indirect_lock);
1763 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1764 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1765 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1766 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1767 spin_unlock(&bp->indirect_lock);
1771 bnx2_remote_phy_event(struct bnx2 *bp)
1774 u8 link_up = bp->link_up;
1777 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1779 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1780 bnx2_send_heart_beat(bp);
1782 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1784 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1790 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1791 bp->duplex = DUPLEX_FULL;
1793 case BNX2_LINK_STATUS_10HALF:
1794 bp->duplex = DUPLEX_HALF;
1795 case BNX2_LINK_STATUS_10FULL:
1796 bp->line_speed = SPEED_10;
1798 case BNX2_LINK_STATUS_100HALF:
1799 bp->duplex = DUPLEX_HALF;
1800 case BNX2_LINK_STATUS_100BASE_T4:
1801 case BNX2_LINK_STATUS_100FULL:
1802 bp->line_speed = SPEED_100;
1804 case BNX2_LINK_STATUS_1000HALF:
1805 bp->duplex = DUPLEX_HALF;
1806 case BNX2_LINK_STATUS_1000FULL:
1807 bp->line_speed = SPEED_1000;
1809 case BNX2_LINK_STATUS_2500HALF:
1810 bp->duplex = DUPLEX_HALF;
1811 case BNX2_LINK_STATUS_2500FULL:
1812 bp->line_speed = SPEED_2500;
1820 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1821 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1822 if (bp->duplex == DUPLEX_FULL)
1823 bp->flow_ctrl = bp->req_flow_ctrl;
1825 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1826 bp->flow_ctrl |= FLOW_CTRL_TX;
1827 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1828 bp->flow_ctrl |= FLOW_CTRL_RX;
1831 old_port = bp->phy_port;
1832 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1833 bp->phy_port = PORT_FIBRE;
1835 bp->phy_port = PORT_TP;
1837 if (old_port != bp->phy_port)
1838 bnx2_set_default_link(bp);
1841 if (bp->link_up != link_up)
1842 bnx2_report_link(bp);
1844 bnx2_set_mac_link(bp);
1848 bnx2_set_remote_link(struct bnx2 *bp)
1852 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1854 case BNX2_FW_EVT_CODE_LINK_EVENT:
1855 bnx2_remote_phy_event(bp);
1857 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1859 bnx2_send_heart_beat(bp);
1866 bnx2_setup_copper_phy(struct bnx2 *bp)
1871 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1873 if (bp->autoneg & AUTONEG_SPEED) {
1874 u32 adv_reg, adv1000_reg;
1875 u32 new_adv_reg = 0;
1876 u32 new_adv1000_reg = 0;
1878 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1879 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1880 ADVERTISE_PAUSE_ASYM);
1882 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1883 adv1000_reg &= PHY_ALL_1000_SPEED;
1885 if (bp->advertising & ADVERTISED_10baseT_Half)
1886 new_adv_reg |= ADVERTISE_10HALF;
1887 if (bp->advertising & ADVERTISED_10baseT_Full)
1888 new_adv_reg |= ADVERTISE_10FULL;
1889 if (bp->advertising & ADVERTISED_100baseT_Half)
1890 new_adv_reg |= ADVERTISE_100HALF;
1891 if (bp->advertising & ADVERTISED_100baseT_Full)
1892 new_adv_reg |= ADVERTISE_100FULL;
1893 if (bp->advertising & ADVERTISED_1000baseT_Full)
1894 new_adv1000_reg |= ADVERTISE_1000FULL;
1896 new_adv_reg |= ADVERTISE_CSMA;
1898 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1900 if ((adv1000_reg != new_adv1000_reg) ||
1901 (adv_reg != new_adv_reg) ||
1902 ((bmcr & BMCR_ANENABLE) == 0)) {
1904 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1905 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1906 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1909 else if (bp->link_up) {
1910 /* Flow ctrl may have changed from auto to forced */
1911 /* or vice-versa. */
1913 bnx2_resolve_flow_ctrl(bp);
1914 bnx2_set_mac_link(bp);
1920 if (bp->req_line_speed == SPEED_100) {
1921 new_bmcr |= BMCR_SPEED100;
1923 if (bp->req_duplex == DUPLEX_FULL) {
1924 new_bmcr |= BMCR_FULLDPLX;
1926 if (new_bmcr != bmcr) {
1929 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1930 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1932 if (bmsr & BMSR_LSTATUS) {
1933 /* Force link down */
1934 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1935 spin_unlock_bh(&bp->phy_lock);
1937 spin_lock_bh(&bp->phy_lock);
1939 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1940 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1943 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1945 /* Normally, the new speed is setup after the link has
1946 * gone down and up again. In some cases, link will not go
1947 * down so we need to set up the new speed here.
1949 if (bmsr & BMSR_LSTATUS) {
1950 bp->line_speed = bp->req_line_speed;
1951 bp->duplex = bp->req_duplex;
1952 bnx2_resolve_flow_ctrl(bp);
1953 bnx2_set_mac_link(bp);
1956 bnx2_resolve_flow_ctrl(bp);
1957 bnx2_set_mac_link(bp);
1963 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1965 if (bp->loopback == MAC_LOOPBACK)
1968 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1969 return (bnx2_setup_serdes_phy(bp, port));
1972 return (bnx2_setup_copper_phy(bp));
1977 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
1981 bp->mii_bmcr = MII_BMCR + 0x10;
1982 bp->mii_bmsr = MII_BMSR + 0x10;
1983 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1984 bp->mii_adv = MII_ADVERTISE + 0x10;
1985 bp->mii_lpa = MII_LPA + 0x10;
1986 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1988 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1989 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1991 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1995 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1997 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1998 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1999 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2000 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2002 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2003 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2004 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2005 val |= BCM5708S_UP1_2G5;
2007 val &= ~BCM5708S_UP1_2G5;
2008 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2010 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2011 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2012 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2013 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2015 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2017 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2018 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2019 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2021 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2027 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2034 bp->mii_up1 = BCM5708S_UP1;
2036 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2037 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2038 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2040 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2041 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2042 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2044 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2045 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2046 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2048 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2049 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2050 val |= BCM5708S_UP1_2G5;
2051 bnx2_write_phy(bp, BCM5708S_UP1, val);
2054 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2055 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2056 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2057 /* increase tx signal amplitude */
2058 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2059 BCM5708S_BLK_ADDR_TX_MISC);
2060 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2061 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2062 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2063 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2066 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2067 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2072 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2073 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2074 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2075 BCM5708S_BLK_ADDR_TX_MISC);
2076 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2077 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2078 BCM5708S_BLK_ADDR_DIG);
2085 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2090 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2092 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2093 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2095 if (bp->dev->mtu > 1500) {
2098 /* Set extended packet length bit */
2099 bnx2_write_phy(bp, 0x18, 0x7);
2100 bnx2_read_phy(bp, 0x18, &val);
2101 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2103 bnx2_write_phy(bp, 0x1c, 0x6c00);
2104 bnx2_read_phy(bp, 0x1c, &val);
2105 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2110 bnx2_write_phy(bp, 0x18, 0x7);
2111 bnx2_read_phy(bp, 0x18, &val);
2112 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2114 bnx2_write_phy(bp, 0x1c, 0x6c00);
2115 bnx2_read_phy(bp, 0x1c, &val);
2116 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2123 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2130 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2131 bnx2_write_phy(bp, 0x18, 0x0c00);
2132 bnx2_write_phy(bp, 0x17, 0x000a);
2133 bnx2_write_phy(bp, 0x15, 0x310b);
2134 bnx2_write_phy(bp, 0x17, 0x201f);
2135 bnx2_write_phy(bp, 0x15, 0x9506);
2136 bnx2_write_phy(bp, 0x17, 0x401f);
2137 bnx2_write_phy(bp, 0x15, 0x14e2);
2138 bnx2_write_phy(bp, 0x18, 0x0400);
2141 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2142 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2143 MII_BNX2_DSP_EXPAND_REG | 0x8);
2144 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2146 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2149 if (bp->dev->mtu > 1500) {
2150 /* Set extended packet length bit */
2151 bnx2_write_phy(bp, 0x18, 0x7);
2152 bnx2_read_phy(bp, 0x18, &val);
2153 bnx2_write_phy(bp, 0x18, val | 0x4000);
2155 bnx2_read_phy(bp, 0x10, &val);
2156 bnx2_write_phy(bp, 0x10, val | 0x1);
2159 bnx2_write_phy(bp, 0x18, 0x7);
2160 bnx2_read_phy(bp, 0x18, &val);
2161 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2163 bnx2_read_phy(bp, 0x10, &val);
2164 bnx2_write_phy(bp, 0x10, val & ~0x1);
2167 /* ethernet@wirespeed */
2168 bnx2_write_phy(bp, 0x18, 0x7007);
2169 bnx2_read_phy(bp, 0x18, &val);
2170 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2176 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2181 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2182 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2184 bp->mii_bmcr = MII_BMCR;
2185 bp->mii_bmsr = MII_BMSR;
2186 bp->mii_bmsr1 = MII_BMSR;
2187 bp->mii_adv = MII_ADVERTISE;
2188 bp->mii_lpa = MII_LPA;
2190 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2192 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2195 bnx2_read_phy(bp, MII_PHYSID1, &val);
2196 bp->phy_id = val << 16;
2197 bnx2_read_phy(bp, MII_PHYSID2, &val);
2198 bp->phy_id |= val & 0xffff;
2200 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2201 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2202 rc = bnx2_init_5706s_phy(bp, reset_phy);
2203 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2204 rc = bnx2_init_5708s_phy(bp, reset_phy);
2205 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2206 rc = bnx2_init_5709s_phy(bp, reset_phy);
2209 rc = bnx2_init_copper_phy(bp, reset_phy);
2214 rc = bnx2_setup_phy(bp, bp->phy_port);
2220 bnx2_set_mac_loopback(struct bnx2 *bp)
2224 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2225 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2226 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2227 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2232 static int bnx2_test_link(struct bnx2 *);
2235 bnx2_set_phy_loopback(struct bnx2 *bp)
2240 spin_lock_bh(&bp->phy_lock);
2241 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2243 spin_unlock_bh(&bp->phy_lock);
2247 for (i = 0; i < 10; i++) {
2248 if (bnx2_test_link(bp) == 0)
2253 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2254 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2255 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2256 BNX2_EMAC_MODE_25G_MODE);
2258 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2259 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2265 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2271 msg_data |= bp->fw_wr_seq;
2273 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2278 /* wait for an acknowledgement. */
2279 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2282 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2284 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2287 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2290 /* If we timed out, inform the firmware that this is the case. */
2291 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2293 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2296 msg_data &= ~BNX2_DRV_MSG_CODE;
2297 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2299 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2304 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2311 bnx2_init_5709_context(struct bnx2 *bp)
2316 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2317 val |= (BCM_PAGE_BITS - 8) << 16;
2318 REG_WR(bp, BNX2_CTX_COMMAND, val);
2319 for (i = 0; i < 10; i++) {
2320 val = REG_RD(bp, BNX2_CTX_COMMAND);
2321 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2325 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2328 for (i = 0; i < bp->ctx_pages; i++) {
2332 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2336 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2337 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2338 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2339 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2340 (u64) bp->ctx_blk_mapping[i] >> 32);
2341 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2342 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2343 for (j = 0; j < 10; j++) {
2345 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2346 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2350 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2359 bnx2_init_context(struct bnx2 *bp)
2365 u32 vcid_addr, pcid_addr, offset;
2370 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2373 vcid_addr = GET_PCID_ADDR(vcid);
2375 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2380 pcid_addr = GET_PCID_ADDR(new_vcid);
2383 vcid_addr = GET_CID_ADDR(vcid);
2384 pcid_addr = vcid_addr;
2387 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2388 vcid_addr += (i << PHY_CTX_SHIFT);
2389 pcid_addr += (i << PHY_CTX_SHIFT);
2391 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2392 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2394 /* Zero out the context. */
2395 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2396 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2402 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2408 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2409 if (good_mbuf == NULL) {
2410 printk(KERN_ERR PFX "Failed to allocate memory in "
2411 "bnx2_alloc_bad_rbuf\n");
2415 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2416 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2420 /* Allocate a bunch of mbufs and save the good ones in an array. */
2421 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2422 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2423 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2424 BNX2_RBUF_COMMAND_ALLOC_REQ);
2426 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2428 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2430 /* The addresses with Bit 9 set are bad memory blocks. */
2431 if (!(val & (1 << 9))) {
2432 good_mbuf[good_mbuf_cnt] = (u16) val;
2436 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2439 /* Free the good ones back to the mbuf pool thus discarding
2440 * all the bad ones. */
2441 while (good_mbuf_cnt) {
2444 val = good_mbuf[good_mbuf_cnt];
2445 val = (val << 9) | val | 1;
2447 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2454 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2458 val = (mac_addr[0] << 8) | mac_addr[1];
2460 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2462 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2463 (mac_addr[4] << 8) | mac_addr[5];
2465 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2469 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2472 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2473 struct rx_bd *rxbd =
2474 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2475 struct page *page = alloc_page(GFP_ATOMIC);
2479 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2480 PCI_DMA_FROMDEVICE);
2482 pci_unmap_addr_set(rx_pg, mapping, mapping);
2483 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2484 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2489 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2491 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2492 struct page *page = rx_pg->page;
2497 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2498 PCI_DMA_FROMDEVICE);
2505 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2507 struct sk_buff *skb;
2508 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2510 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2511 unsigned long align;
2513 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2518 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2519 skb_reserve(skb, BNX2_RX_ALIGN - align);
2521 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2522 PCI_DMA_FROMDEVICE);
2525 pci_unmap_addr_set(rx_buf, mapping, mapping);
2527 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2528 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2530 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2536 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2538 struct status_block *sblk = bnapi->status_blk.msi;
2539 u32 new_link_state, old_link_state;
2542 new_link_state = sblk->status_attn_bits & event;
2543 old_link_state = sblk->status_attn_bits_ack & event;
2544 if (new_link_state != old_link_state) {
2546 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2548 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2556 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2558 spin_lock(&bp->phy_lock);
2560 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2562 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2563 bnx2_set_remote_link(bp);
2565 spin_unlock(&bp->phy_lock);
2570 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2574 /* Tell compiler that status block fields can change. */
2576 cons = *bnapi->hw_tx_cons_ptr;
2577 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2583 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2585 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2586 u16 hw_cons, sw_cons, sw_ring_cons;
2589 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2590 sw_cons = txr->tx_cons;
2592 while (sw_cons != hw_cons) {
2593 struct sw_bd *tx_buf;
2594 struct sk_buff *skb;
2597 sw_ring_cons = TX_RING_IDX(sw_cons);
2599 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2602 /* partial BD completions possible with TSO packets */
2603 if (skb_is_gso(skb)) {
2604 u16 last_idx, last_ring_idx;
2606 last_idx = sw_cons +
2607 skb_shinfo(skb)->nr_frags + 1;
2608 last_ring_idx = sw_ring_cons +
2609 skb_shinfo(skb)->nr_frags + 1;
2610 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2613 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2618 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2619 skb_headlen(skb), PCI_DMA_TODEVICE);
2622 last = skb_shinfo(skb)->nr_frags;
2624 for (i = 0; i < last; i++) {
2625 sw_cons = NEXT_TX_BD(sw_cons);
2627 pci_unmap_page(bp->pdev,
2629 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2631 skb_shinfo(skb)->frags[i].size,
2635 sw_cons = NEXT_TX_BD(sw_cons);
2639 if (tx_pkt == budget)
2642 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2645 txr->hw_tx_cons = hw_cons;
2646 txr->tx_cons = sw_cons;
2647 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2648 * before checking for netif_queue_stopped(). Without the
2649 * memory barrier, there is a small possibility that bnx2_start_xmit()
2650 * will miss it and cause the queue to be stopped forever.
2654 if (unlikely(netif_queue_stopped(bp->dev)) &&
2655 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2656 netif_tx_lock(bp->dev);
2657 if ((netif_queue_stopped(bp->dev)) &&
2658 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2659 netif_wake_queue(bp->dev);
2660 netif_tx_unlock(bp->dev);
2666 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2667 struct sk_buff *skb, int count)
2669 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2670 struct rx_bd *cons_bd, *prod_bd;
2673 u16 hw_prod = rxr->rx_pg_prod, prod;
2674 u16 cons = rxr->rx_pg_cons;
2676 for (i = 0; i < count; i++) {
2677 prod = RX_PG_RING_IDX(hw_prod);
2679 prod_rx_pg = &rxr->rx_pg_ring[prod];
2680 cons_rx_pg = &rxr->rx_pg_ring[cons];
2681 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2682 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2684 if (i == 0 && skb) {
2686 struct skb_shared_info *shinfo;
2688 shinfo = skb_shinfo(skb);
2690 page = shinfo->frags[shinfo->nr_frags].page;
2691 shinfo->frags[shinfo->nr_frags].page = NULL;
2692 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2693 PCI_DMA_FROMDEVICE);
2694 cons_rx_pg->page = page;
2695 pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2699 prod_rx_pg->page = cons_rx_pg->page;
2700 cons_rx_pg->page = NULL;
2701 pci_unmap_addr_set(prod_rx_pg, mapping,
2702 pci_unmap_addr(cons_rx_pg, mapping));
2704 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2705 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2708 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2709 hw_prod = NEXT_RX_BD(hw_prod);
2711 rxr->rx_pg_prod = hw_prod;
2712 rxr->rx_pg_cons = cons;
2716 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2717 struct sk_buff *skb, u16 cons, u16 prod)
2719 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2720 struct rx_bd *cons_bd, *prod_bd;
2722 cons_rx_buf = &rxr->rx_buf_ring[cons];
2723 prod_rx_buf = &rxr->rx_buf_ring[prod];
2725 pci_dma_sync_single_for_device(bp->pdev,
2726 pci_unmap_addr(cons_rx_buf, mapping),
2727 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2729 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2731 prod_rx_buf->skb = skb;
2736 pci_unmap_addr_set(prod_rx_buf, mapping,
2737 pci_unmap_addr(cons_rx_buf, mapping));
2739 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2740 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2741 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2742 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2746 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2747 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2751 u16 prod = ring_idx & 0xffff;
2753 err = bnx2_alloc_rx_skb(bp, rxr, prod);
2754 if (unlikely(err)) {
2755 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2757 unsigned int raw_len = len + 4;
2758 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2760 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2765 skb_reserve(skb, BNX2_RX_OFFSET);
2766 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2767 PCI_DMA_FROMDEVICE);
2773 unsigned int i, frag_len, frag_size, pages;
2774 struct sw_pg *rx_pg;
2775 u16 pg_cons = rxr->rx_pg_cons;
2776 u16 pg_prod = rxr->rx_pg_prod;
2778 frag_size = len + 4 - hdr_len;
2779 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2780 skb_put(skb, hdr_len);
2782 for (i = 0; i < pages; i++) {
2783 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2784 if (unlikely(frag_len <= 4)) {
2785 unsigned int tail = 4 - frag_len;
2787 rxr->rx_pg_cons = pg_cons;
2788 rxr->rx_pg_prod = pg_prod;
2789 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2796 &skb_shinfo(skb)->frags[i - 1];
2798 skb->data_len -= tail;
2799 skb->truesize -= tail;
2803 rx_pg = &rxr->rx_pg_ring[pg_cons];
2805 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2806 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2811 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2814 err = bnx2_alloc_rx_page(bp, rxr,
2815 RX_PG_RING_IDX(pg_prod));
2816 if (unlikely(err)) {
2817 rxr->rx_pg_cons = pg_cons;
2818 rxr->rx_pg_prod = pg_prod;
2819 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
2824 frag_size -= frag_len;
2825 skb->data_len += frag_len;
2826 skb->truesize += frag_len;
2827 skb->len += frag_len;
2829 pg_prod = NEXT_RX_BD(pg_prod);
2830 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2832 rxr->rx_pg_prod = pg_prod;
2833 rxr->rx_pg_cons = pg_cons;
2839 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2843 /* Tell compiler that status block fields can change. */
2845 cons = *bnapi->hw_rx_cons_ptr;
2846 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2852 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2854 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
2855 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2856 struct l2_fhdr *rx_hdr;
2857 int rx_pkt = 0, pg_ring_used = 0;
2859 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2860 sw_cons = rxr->rx_cons;
2861 sw_prod = rxr->rx_prod;
2863 /* Memory barrier necessary as speculative reads of the rx
2864 * buffer can be ahead of the index in the status block
2867 while (sw_cons != hw_cons) {
2868 unsigned int len, hdr_len;
2870 struct sw_bd *rx_buf;
2871 struct sk_buff *skb;
2872 dma_addr_t dma_addr;
2874 sw_ring_cons = RX_RING_IDX(sw_cons);
2875 sw_ring_prod = RX_RING_IDX(sw_prod);
2877 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
2882 dma_addr = pci_unmap_addr(rx_buf, mapping);
2884 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2885 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2886 PCI_DMA_FROMDEVICE);
2888 rx_hdr = (struct l2_fhdr *) skb->data;
2889 len = rx_hdr->l2_fhdr_pkt_len;
2891 if ((status = rx_hdr->l2_fhdr_status) &
2892 (L2_FHDR_ERRORS_BAD_CRC |
2893 L2_FHDR_ERRORS_PHY_DECODE |
2894 L2_FHDR_ERRORS_ALIGNMENT |
2895 L2_FHDR_ERRORS_TOO_SHORT |
2896 L2_FHDR_ERRORS_GIANT_FRAME)) {
2898 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2903 if (status & L2_FHDR_STATUS_SPLIT) {
2904 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2906 } else if (len > bp->rx_jumbo_thresh) {
2907 hdr_len = bp->rx_jumbo_thresh;
2913 if (len <= bp->rx_copy_thresh) {
2914 struct sk_buff *new_skb;
2916 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2917 if (new_skb == NULL) {
2918 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2924 skb_copy_from_linear_data_offset(skb,
2926 new_skb->data, len + 2);
2927 skb_reserve(new_skb, 2);
2928 skb_put(new_skb, len);
2930 bnx2_reuse_rx_skb(bp, rxr, skb,
2931 sw_ring_cons, sw_ring_prod);
2934 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
2935 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2938 skb->protocol = eth_type_trans(skb, bp->dev);
2940 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2941 (ntohs(skb->protocol) != 0x8100)) {
2948 skb->ip_summed = CHECKSUM_NONE;
2950 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2951 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2953 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2954 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2955 skb->ip_summed = CHECKSUM_UNNECESSARY;
2959 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
2960 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2961 rx_hdr->l2_fhdr_vlan_tag);
2965 netif_receive_skb(skb);
2967 bp->dev->last_rx = jiffies;
2971 sw_cons = NEXT_RX_BD(sw_cons);
2972 sw_prod = NEXT_RX_BD(sw_prod);
2974 if ((rx_pkt == budget))
2977 /* Refresh hw_cons to see if there is new work */
2978 if (sw_cons == hw_cons) {
2979 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2983 rxr->rx_cons = sw_cons;
2984 rxr->rx_prod = sw_prod;
2987 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
2989 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
2991 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
2999 /* MSI ISR - The only difference between this and the INTx ISR
3000 * is that the MSI interrupt is always serviced.
3003 bnx2_msi(int irq, void *dev_instance)
3005 struct bnx2_napi *bnapi = dev_instance;
3006 struct bnx2 *bp = bnapi->bp;
3007 struct net_device *dev = bp->dev;
3009 prefetch(bnapi->status_blk.msi);
3010 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3011 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3012 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3014 /* Return here if interrupt is disabled. */
3015 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3018 netif_rx_schedule(dev, &bnapi->napi);
3024 bnx2_msi_1shot(int irq, void *dev_instance)
3026 struct bnx2_napi *bnapi = dev_instance;
3027 struct bnx2 *bp = bnapi->bp;
3028 struct net_device *dev = bp->dev;
3030 prefetch(bnapi->status_blk.msi);
3032 /* Return here if interrupt is disabled. */
3033 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3036 netif_rx_schedule(dev, &bnapi->napi);
3042 bnx2_interrupt(int irq, void *dev_instance)
3044 struct bnx2_napi *bnapi = dev_instance;
3045 struct bnx2 *bp = bnapi->bp;
3046 struct net_device *dev = bp->dev;
3047 struct status_block *sblk = bnapi->status_blk.msi;
3049 /* When using INTx, it is possible for the interrupt to arrive
3050 * at the CPU before the status block posted prior to the
3051 * interrupt. Reading a register will flush the status block.
3052 * When using MSI, the MSI message will always complete after
3053 * the status block write.
3055 if ((sblk->status_idx == bnapi->last_status_idx) &&
3056 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3057 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3060 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3061 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3062 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3064 /* Read back to deassert IRQ immediately to avoid too many
3065 * spurious interrupts.
3067 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3069 /* Return here if interrupt is shared and is disabled. */
3070 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3073 if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
3074 bnapi->last_status_idx = sblk->status_idx;
3075 __netif_rx_schedule(dev, &bnapi->napi);
3082 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3084 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3085 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3087 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3088 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3093 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3094 STATUS_ATTN_BITS_TIMER_ABORT)
3097 bnx2_has_work(struct bnx2_napi *bnapi)
3099 struct status_block *sblk = bnapi->status_blk.msi;
3101 if (bnx2_has_fast_work(bnapi))
3104 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3105 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3111 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3113 struct status_block *sblk = bnapi->status_blk.msi;
3114 u32 status_attn_bits = sblk->status_attn_bits;
3115 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3117 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3118 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3120 bnx2_phy_int(bp, bnapi);
3122 /* This is needed to take care of transient status
3123 * during link changes.
3125 REG_WR(bp, BNX2_HC_COMMAND,
3126 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3127 REG_RD(bp, BNX2_HC_COMMAND);
3131 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3132 int work_done, int budget)
3134 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3135 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3137 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3138 bnx2_tx_int(bp, bnapi, 0);
3140 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3141 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3146 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3148 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3149 struct bnx2 *bp = bnapi->bp;
3151 struct status_block_msix *sblk = bnapi->status_blk.msix;
3154 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3155 if (unlikely(work_done >= budget))
3158 bnapi->last_status_idx = sblk->status_idx;
3159 /* status idx must be read before checking for more work. */
3161 if (likely(!bnx2_has_fast_work(bnapi))) {
3163 netif_rx_complete(bp->dev, napi);
3164 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3165 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3166 bnapi->last_status_idx);
3173 static int bnx2_poll(struct napi_struct *napi, int budget)
3175 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3176 struct bnx2 *bp = bnapi->bp;
3178 struct status_block *sblk = bnapi->status_blk.msi;
3181 bnx2_poll_link(bp, bnapi);
3183 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3185 if (unlikely(work_done >= budget))
3188 /* bnapi->last_status_idx is used below to tell the hw how
3189 * much work has been processed, so we must read it before
3190 * checking for more work.
3192 bnapi->last_status_idx = sblk->status_idx;
3194 if (likely(!bnx2_has_work(bnapi))) {
3195 netif_rx_complete(bp->dev, napi);
3196 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3197 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3198 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3199 bnapi->last_status_idx);
3202 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3203 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3204 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3205 bnapi->last_status_idx);
3207 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3208 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3209 bnapi->last_status_idx);
3217 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3218 * from set_multicast.
3221 bnx2_set_rx_mode(struct net_device *dev)
3223 struct bnx2 *bp = netdev_priv(dev);
3224 u32 rx_mode, sort_mode;
3225 struct dev_addr_list *uc_ptr;
3228 spin_lock_bh(&bp->phy_lock);
3230 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3231 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3232 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3234 if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE))
3235 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3237 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
3238 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3240 if (dev->flags & IFF_PROMISC) {
3241 /* Promiscuous mode. */
3242 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3243 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3244 BNX2_RPM_SORT_USER0_PROM_VLAN;
3246 else if (dev->flags & IFF_ALLMULTI) {
3247 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3248 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3251 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3254 /* Accept one or more multicast(s). */
3255 struct dev_mc_list *mclist;
3256 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3261 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3263 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3264 i++, mclist = mclist->next) {
3266 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3268 regidx = (bit & 0xe0) >> 5;
3270 mc_filter[regidx] |= (1 << bit);
3273 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3274 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3278 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3282 if (dev->uc_count > BNX2_MAX_UNICAST_ADDRESSES) {
3283 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3284 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3285 BNX2_RPM_SORT_USER0_PROM_VLAN;
3286 } else if (!(dev->flags & IFF_PROMISC)) {
3287 uc_ptr = dev->uc_list;
3289 /* Add all entries into to the match filter list */
3290 for (i = 0; i < dev->uc_count; i++) {
3291 bnx2_set_mac_addr(bp, uc_ptr->da_addr,
3292 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3294 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3295 uc_ptr = uc_ptr->next;
3300 if (rx_mode != bp->rx_mode) {
3301 bp->rx_mode = rx_mode;
3302 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3305 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3306 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3307 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3309 spin_unlock_bh(&bp->phy_lock);
3313 load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3319 if (rv2p_proc == RV2P_PROC2 && CHIP_NUM(bp) == CHIP_NUM_5709) {
3320 val = le32_to_cpu(rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC]);
3321 val &= ~XI_RV2P_PROC2_BD_PAGE_SIZE_MSK;
3322 val |= XI_RV2P_PROC2_BD_PAGE_SIZE;
3323 rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC] = cpu_to_le32(val);
3326 for (i = 0; i < rv2p_code_len; i += 8) {
3327 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
3329 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
3332 if (rv2p_proc == RV2P_PROC1) {
3333 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3334 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3337 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3338 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3342 /* Reset the processor, un-stall is done later. */
3343 if (rv2p_proc == RV2P_PROC1) {
3344 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3347 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3352 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, struct fw_info *fw)
3359 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3360 val |= cpu_reg->mode_value_halt;
3361 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3362 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3364 /* Load the Text area. */
3365 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3369 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3374 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3375 bnx2_reg_wr_ind(bp, offset, le32_to_cpu(fw->text[j]));
3379 /* Load the Data area. */
3380 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3384 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3385 bnx2_reg_wr_ind(bp, offset, fw->data[j]);
3389 /* Load the SBSS area. */
3390 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3394 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3395 bnx2_reg_wr_ind(bp, offset, 0);
3399 /* Load the BSS area. */
3400 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3404 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3405 bnx2_reg_wr_ind(bp, offset, 0);
3409 /* Load the Read-Only area. */
3410 offset = cpu_reg->spad_base +
3411 (fw->rodata_addr - cpu_reg->mips_view_base);
3415 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3416 bnx2_reg_wr_ind(bp, offset, fw->rodata[j]);
3420 /* Clear the pre-fetch instruction. */
3421 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3422 bnx2_reg_wr_ind(bp, cpu_reg->pc, fw->start_addr);
3424 /* Start the CPU. */
3425 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3426 val &= ~cpu_reg->mode_value_halt;
3427 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3428 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3434 bnx2_init_cpus(struct bnx2 *bp)
3440 /* Initialize the RV2P processor. */
3441 text = vmalloc(FW_BUF_SIZE);
3444 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3445 rv2p = bnx2_xi_rv2p_proc1;
3446 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3448 rv2p = bnx2_rv2p_proc1;
3449 rv2p_len = sizeof(bnx2_rv2p_proc1);
3451 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3455 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3457 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3458 rv2p = bnx2_xi_rv2p_proc2;
3459 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3461 rv2p = bnx2_rv2p_proc2;
3462 rv2p_len = sizeof(bnx2_rv2p_proc2);
3464 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3468 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3470 /* Initialize the RX Processor. */
3471 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3472 fw = &bnx2_rxp_fw_09;
3474 fw = &bnx2_rxp_fw_06;
3477 rc = load_cpu_fw(bp, &cpu_reg_rxp, fw);
3481 /* Initialize the TX Processor. */
3482 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3483 fw = &bnx2_txp_fw_09;
3485 fw = &bnx2_txp_fw_06;
3488 rc = load_cpu_fw(bp, &cpu_reg_txp, fw);
3492 /* Initialize the TX Patch-up Processor. */
3493 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3494 fw = &bnx2_tpat_fw_09;
3496 fw = &bnx2_tpat_fw_06;
3499 rc = load_cpu_fw(bp, &cpu_reg_tpat, fw);
3503 /* Initialize the Completion Processor. */
3504 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3505 fw = &bnx2_com_fw_09;
3507 fw = &bnx2_com_fw_06;
3510 rc = load_cpu_fw(bp, &cpu_reg_com, fw);
3514 /* Initialize the Command Processor. */
3515 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3516 fw = &bnx2_cp_fw_09;
3518 fw = &bnx2_cp_fw_06;
3521 rc = load_cpu_fw(bp, &cpu_reg_cp, fw);
3529 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3533 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3539 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3540 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3541 PCI_PM_CTRL_PME_STATUS);
3543 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3544 /* delay required during transition out of D3hot */
3547 val = REG_RD(bp, BNX2_EMAC_MODE);
3548 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3549 val &= ~BNX2_EMAC_MODE_MPKT;
3550 REG_WR(bp, BNX2_EMAC_MODE, val);
3552 val = REG_RD(bp, BNX2_RPM_CONFIG);
3553 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3554 REG_WR(bp, BNX2_RPM_CONFIG, val);
3565 autoneg = bp->autoneg;
3566 advertising = bp->advertising;
3568 if (bp->phy_port == PORT_TP) {
3569 bp->autoneg = AUTONEG_SPEED;
3570 bp->advertising = ADVERTISED_10baseT_Half |
3571 ADVERTISED_10baseT_Full |
3572 ADVERTISED_100baseT_Half |
3573 ADVERTISED_100baseT_Full |
3577 spin_lock_bh(&bp->phy_lock);
3578 bnx2_setup_phy(bp, bp->phy_port);
3579 spin_unlock_bh(&bp->phy_lock);
3581 bp->autoneg = autoneg;
3582 bp->advertising = advertising;
3584 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3586 val = REG_RD(bp, BNX2_EMAC_MODE);
3588 /* Enable port mode. */
3589 val &= ~BNX2_EMAC_MODE_PORT;
3590 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3591 BNX2_EMAC_MODE_ACPI_RCVD |
3592 BNX2_EMAC_MODE_MPKT;
3593 if (bp->phy_port == PORT_TP)
3594 val |= BNX2_EMAC_MODE_PORT_MII;
3596 val |= BNX2_EMAC_MODE_PORT_GMII;
3597 if (bp->line_speed == SPEED_2500)
3598 val |= BNX2_EMAC_MODE_25G_MODE;
3601 REG_WR(bp, BNX2_EMAC_MODE, val);
3603 /* receive all multicast */
3604 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3605 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3608 REG_WR(bp, BNX2_EMAC_RX_MODE,
3609 BNX2_EMAC_RX_MODE_SORT_MODE);
3611 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3612 BNX2_RPM_SORT_USER0_MC_EN;
3613 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3614 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3615 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3616 BNX2_RPM_SORT_USER0_ENA);
3618 /* Need to enable EMAC and RPM for WOL. */
3619 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3620 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3621 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3622 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3624 val = REG_RD(bp, BNX2_RPM_CONFIG);
3625 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3626 REG_WR(bp, BNX2_RPM_CONFIG, val);
3628 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3631 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3634 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3635 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3638 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3639 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3640 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3649 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3651 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3654 /* No more memory access after this point until
3655 * device is brought back to D0.
3667 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3672 /* Request access to the flash interface. */
3673 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3674 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3675 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3676 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3682 if (j >= NVRAM_TIMEOUT_COUNT)
3689 bnx2_release_nvram_lock(struct bnx2 *bp)
3694 /* Relinquish nvram interface. */
3695 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3697 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3698 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3699 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3705 if (j >= NVRAM_TIMEOUT_COUNT)
3713 bnx2_enable_nvram_write(struct bnx2 *bp)
3717 val = REG_RD(bp, BNX2_MISC_CFG);
3718 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3720 if (bp->flash_info->flags & BNX2_NV_WREN) {
3723 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3724 REG_WR(bp, BNX2_NVM_COMMAND,
3725 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3727 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3730 val = REG_RD(bp, BNX2_NVM_COMMAND);
3731 if (val & BNX2_NVM_COMMAND_DONE)
3735 if (j >= NVRAM_TIMEOUT_COUNT)
3742 bnx2_disable_nvram_write(struct bnx2 *bp)
3746 val = REG_RD(bp, BNX2_MISC_CFG);
3747 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3752 bnx2_enable_nvram_access(struct bnx2 *bp)
3756 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3757 /* Enable both bits, even on read. */
3758 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3759 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3763 bnx2_disable_nvram_access(struct bnx2 *bp)
3767 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3768 /* Disable both bits, even after read. */
3769 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3770 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3771 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3775 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3780 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3781 /* Buffered flash, no erase needed */
3784 /* Build an erase command */
3785 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3786 BNX2_NVM_COMMAND_DOIT;
3788 /* Need to clear DONE bit separately. */
3789 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3791 /* Address of the NVRAM to read from. */
3792 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3794 /* Issue an erase command. */
3795 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3797 /* Wait for completion. */
3798 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3803 val = REG_RD(bp, BNX2_NVM_COMMAND);
3804 if (val & BNX2_NVM_COMMAND_DONE)
3808 if (j >= NVRAM_TIMEOUT_COUNT)
3815 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3820 /* Build the command word. */
3821 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3823 /* Calculate an offset of a buffered flash, not needed for 5709. */
3824 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3825 offset = ((offset / bp->flash_info->page_size) <<
3826 bp->flash_info->page_bits) +
3827 (offset % bp->flash_info->page_size);
3830 /* Need to clear DONE bit separately. */
3831 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3833 /* Address of the NVRAM to read from. */
3834 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3836 /* Issue a read command. */
3837 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3839 /* Wait for completion. */
3840 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3845 val = REG_RD(bp, BNX2_NVM_COMMAND);
3846 if (val & BNX2_NVM_COMMAND_DONE) {
3847 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3848 memcpy(ret_val, &v, 4);
3852 if (j >= NVRAM_TIMEOUT_COUNT)
3860 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3866 /* Build the command word. */
3867 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3869 /* Calculate an offset of a buffered flash, not needed for 5709. */
3870 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3871 offset = ((offset / bp->flash_info->page_size) <<
3872 bp->flash_info->page_bits) +
3873 (offset % bp->flash_info->page_size);
3876 /* Need to clear DONE bit separately. */
3877 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3879 memcpy(&val32, val, 4);
3881 /* Write the data. */
3882 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
3884 /* Address of the NVRAM to write to. */
3885 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3887 /* Issue the write command. */
3888 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3890 /* Wait for completion. */
3891 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3894 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3897 if (j >= NVRAM_TIMEOUT_COUNT)
3904 bnx2_init_nvram(struct bnx2 *bp)
3907 int j, entry_count, rc = 0;
3908 struct flash_spec *flash;
3910 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3911 bp->flash_info = &flash_5709;
3912 goto get_flash_size;
3915 /* Determine the selected interface. */
3916 val = REG_RD(bp, BNX2_NVM_CFG1);
3918 entry_count = ARRAY_SIZE(flash_table);
3920 if (val & 0x40000000) {
3922 /* Flash interface has been reconfigured */
3923 for (j = 0, flash = &flash_table[0]; j < entry_count;
3925 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3926 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3927 bp->flash_info = flash;
3934 /* Not yet been reconfigured */
3936 if (val & (1 << 23))
3937 mask = FLASH_BACKUP_STRAP_MASK;
3939 mask = FLASH_STRAP_MASK;
3941 for (j = 0, flash = &flash_table[0]; j < entry_count;
3944 if ((val & mask) == (flash->strapping & mask)) {
3945 bp->flash_info = flash;
3947 /* Request access to the flash interface. */
3948 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3951 /* Enable access to flash interface */
3952 bnx2_enable_nvram_access(bp);
3954 /* Reconfigure the flash interface */
3955 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3956 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3957 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3958 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3960 /* Disable access to flash interface */
3961 bnx2_disable_nvram_access(bp);
3962 bnx2_release_nvram_lock(bp);
3967 } /* if (val & 0x40000000) */
3969 if (j == entry_count) {
3970 bp->flash_info = NULL;
3971 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3976 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
3977 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3979 bp->flash_size = val;
3981 bp->flash_size = bp->flash_info->total_size;
3987 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3991 u32 cmd_flags, offset32, len32, extra;
3996 /* Request access to the flash interface. */
3997 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4000 /* Enable access to flash interface */
4001 bnx2_enable_nvram_access(bp);
4014 pre_len = 4 - (offset & 3);
4016 if (pre_len >= len32) {
4018 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4019 BNX2_NVM_COMMAND_LAST;
4022 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4025 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4030 memcpy(ret_buf, buf + (offset & 3), pre_len);
4037 extra = 4 - (len32 & 3);
4038 len32 = (len32 + 4) & ~3;
4045 cmd_flags = BNX2_NVM_COMMAND_LAST;
4047 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4048 BNX2_NVM_COMMAND_LAST;
4050 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4052 memcpy(ret_buf, buf, 4 - extra);
4054 else if (len32 > 0) {
4057 /* Read the first word. */
4061 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4063 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4065 /* Advance to the next dword. */
4070 while (len32 > 4 && rc == 0) {
4071 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4073 /* Advance to the next dword. */
4082 cmd_flags = BNX2_NVM_COMMAND_LAST;
4083 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4085 memcpy(ret_buf, buf, 4 - extra);
4088 /* Disable access to flash interface */
4089 bnx2_disable_nvram_access(bp);
4091 bnx2_release_nvram_lock(bp);
4097 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4100 u32 written, offset32, len32;
4101 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4103 int align_start, align_end;
4108 align_start = align_end = 0;
4110 if ((align_start = (offset32 & 3))) {
4112 len32 += align_start;
4115 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4120 align_end = 4 - (len32 & 3);
4122 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4126 if (align_start || align_end) {
4127 align_buf = kmalloc(len32, GFP_KERNEL);
4128 if (align_buf == NULL)
4131 memcpy(align_buf, start, 4);
4134 memcpy(align_buf + len32 - 4, end, 4);
4136 memcpy(align_buf + align_start, data_buf, buf_size);
4140 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4141 flash_buffer = kmalloc(264, GFP_KERNEL);
4142 if (flash_buffer == NULL) {
4144 goto nvram_write_end;
4149 while ((written < len32) && (rc == 0)) {
4150 u32 page_start, page_end, data_start, data_end;
4151 u32 addr, cmd_flags;
4154 /* Find the page_start addr */
4155 page_start = offset32 + written;
4156 page_start -= (page_start % bp->flash_info->page_size);
4157 /* Find the page_end addr */
4158 page_end = page_start + bp->flash_info->page_size;
4159 /* Find the data_start addr */
4160 data_start = (written == 0) ? offset32 : page_start;
4161 /* Find the data_end addr */
4162 data_end = (page_end > offset32 + len32) ?
4163 (offset32 + len32) : page_end;
4165 /* Request access to the flash interface. */
4166 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4167 goto nvram_write_end;
4169 /* Enable access to flash interface */
4170 bnx2_enable_nvram_access(bp);
4172 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4173 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4176 /* Read the whole page into the buffer
4177 * (non-buffer flash only) */
4178 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4179 if (j == (bp->flash_info->page_size - 4)) {
4180 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4182 rc = bnx2_nvram_read_dword(bp,
4188 goto nvram_write_end;
4194 /* Enable writes to flash interface (unlock write-protect) */
4195 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4196 goto nvram_write_end;
4198 /* Loop to write back the buffer data from page_start to
4201 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4202 /* Erase the page */
4203 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4204 goto nvram_write_end;
4206 /* Re-enable the write again for the actual write */
4207 bnx2_enable_nvram_write(bp);
4209 for (addr = page_start; addr < data_start;
4210 addr += 4, i += 4) {
4212 rc = bnx2_nvram_write_dword(bp, addr,
4213 &flash_buffer[i], cmd_flags);
4216 goto nvram_write_end;
4222 /* Loop to write the new data from data_start to data_end */
4223 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4224 if ((addr == page_end - 4) ||
4225 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4226 (addr == data_end - 4))) {
4228 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4230 rc = bnx2_nvram_write_dword(bp, addr, buf,
4234 goto nvram_write_end;
4240 /* Loop to write back the buffer data from data_end
4242 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4243 for (addr = data_end; addr < page_end;
4244 addr += 4, i += 4) {
4246 if (addr == page_end-4) {
4247 cmd_flags = BNX2_NVM_COMMAND_LAST;
4249 rc = bnx2_nvram_write_dword(bp, addr,
4250 &flash_buffer[i], cmd_flags);
4253 goto nvram_write_end;
4259 /* Disable writes to flash interface (lock write-protect) */
4260 bnx2_disable_nvram_write(bp);
4262 /* Disable access to flash interface */
4263 bnx2_disable_nvram_access(bp);
4264 bnx2_release_nvram_lock(bp);
4266 /* Increment written */
4267 written += data_end - data_start;
4271 kfree(flash_buffer);
4277 bnx2_init_fw_cap(struct bnx2 *bp)
4281 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4282 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4284 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4285 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4287 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4288 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4291 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4292 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4293 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4296 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4297 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4300 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4302 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4303 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4304 bp->phy_port = PORT_FIBRE;
4306 bp->phy_port = PORT_TP;
4308 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4309 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4312 if (netif_running(bp->dev) && sig)
4313 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4317 bnx2_setup_msix_tbl(struct bnx2 *bp)
4319 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4321 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4322 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4326 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4332 /* Wait for the current PCI transaction to complete before
4333 * issuing a reset. */
4334 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4335 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4336 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4337 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4338 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4339 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4342 /* Wait for the firmware to tell us it is ok to issue a reset. */
4343 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4345 /* Deposit a driver reset signature so the firmware knows that
4346 * this is a soft reset. */
4347 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4348 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4350 /* Do a dummy read to force the chip to complete all current transaction
4351 * before we issue a reset. */
4352 val = REG_RD(bp, BNX2_MISC_ID);
4354 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4355 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4356 REG_RD(bp, BNX2_MISC_COMMAND);
4359 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4360 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4362 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4365 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4366 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4367 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4370 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4372 /* Reading back any register after chip reset will hang the
4373 * bus on 5706 A0 and A1. The msleep below provides plenty
4374 * of margin for write posting.
4376 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4377 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4380 /* Reset takes approximate 30 usec */
4381 for (i = 0; i < 10; i++) {
4382 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4383 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4384 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4389 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4390 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4391 printk(KERN_ERR PFX "Chip reset did not complete\n");
4396 /* Make sure byte swapping is properly configured. */
4397 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4398 if (val != 0x01020304) {
4399 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4403 /* Wait for the firmware to finish its initialization. */
4404 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4408 spin_lock_bh(&bp->phy_lock);
4409 old_port = bp->phy_port;
4410 bnx2_init_fw_cap(bp);
4411 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4412 old_port != bp->phy_port)
4413 bnx2_set_default_remote_link(bp);
4414 spin_unlock_bh(&bp->phy_lock);
4416 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4417 /* Adjust the voltage regular to two steps lower. The default
4418 * of this register is 0x0000000e. */
4419 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4421 /* Remove bad rbuf memory from the free pool. */
4422 rc = bnx2_alloc_bad_rbuf(bp);
4425 if (bp->flags & BNX2_FLAG_USING_MSIX)
4426 bnx2_setup_msix_tbl(bp);
4432 bnx2_init_chip(struct bnx2 *bp)
4437 /* Make sure the interrupt is not active. */
4438 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4440 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4441 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4443 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4445 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4446 DMA_READ_CHANS << 12 |
4447 DMA_WRITE_CHANS << 16;
4449 val |= (0x2 << 20) | (1 << 11);
4451 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4454 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4455 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4456 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4458 REG_WR(bp, BNX2_DMA_CONFIG, val);
4460 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4461 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4462 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4463 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4466 if (bp->flags & BNX2_FLAG_PCIX) {
4469 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4471 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4472 val16 & ~PCI_X_CMD_ERO);
4475 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4476 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4477 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4478 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4480 /* Initialize context mapping and zero out the quick contexts. The
4481 * context block must have already been enabled. */
4482 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4483 rc = bnx2_init_5709_context(bp);
4487 bnx2_init_context(bp);
4489 if ((rc = bnx2_init_cpus(bp)) != 0)
4492 bnx2_init_nvram(bp);
4494 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4496 val = REG_RD(bp, BNX2_MQ_CONFIG);
4497 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4498 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4499 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4500 val |= BNX2_MQ_CONFIG_HALT_DIS;
4502 REG_WR(bp, BNX2_MQ_CONFIG, val);
4504 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4505 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4506 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4508 val = (BCM_PAGE_BITS - 8) << 24;
4509 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4511 /* Configure page size. */
4512 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4513 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4514 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4515 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4517 val = bp->mac_addr[0] +
4518 (bp->mac_addr[1] << 8) +
4519 (bp->mac_addr[2] << 16) +
4521 (bp->mac_addr[4] << 8) +
4522 (bp->mac_addr[5] << 16);
4523 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4525 /* Program the MTU. Also include 4 bytes for CRC32. */
4526 val = bp->dev->mtu + ETH_HLEN + 4;
4527 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4528 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4529 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4531 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4532 bp->bnx2_napi[i].last_status_idx = 0;
4534 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4536 /* Set up how to generate a link change interrupt. */
4537 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4539 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4540 (u64) bp->status_blk_mapping & 0xffffffff);
4541 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4543 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4544 (u64) bp->stats_blk_mapping & 0xffffffff);
4545 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4546 (u64) bp->stats_blk_mapping >> 32);
4548 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4549 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4551 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4552 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4554 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4555 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4557 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4559 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4561 REG_WR(bp, BNX2_HC_COM_TICKS,
4562 (bp->com_ticks_int << 16) | bp->com_ticks);
4564 REG_WR(bp, BNX2_HC_CMD_TICKS,
4565 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4567 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4568 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4570 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4571 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4573 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4574 val = BNX2_HC_CONFIG_COLLECT_STATS;
4576 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4577 BNX2_HC_CONFIG_COLLECT_STATS;
4580 if (bp->irq_nvecs > 1) {
4581 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4582 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4584 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4587 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4588 val |= BNX2_HC_CONFIG_ONE_SHOT;
4590 REG_WR(bp, BNX2_HC_CONFIG, val);
4592 for (i = 1; i < bp->irq_nvecs; i++) {
4593 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4594 BNX2_HC_SB_CONFIG_1;
4597 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4598 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4599 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4601 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4602 (bp->tx_quick_cons_trip_int << 16) |
4603 bp->tx_quick_cons_trip);
4605 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4606 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4608 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4609 (bp->rx_quick_cons_trip_int << 16) |
4610 bp->rx_quick_cons_trip);
4612 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4613 (bp->rx_ticks_int << 16) | bp->rx_ticks);
4616 /* Clear internal stats counters. */
4617 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4619 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4621 /* Initialize the receive filter. */
4622 bnx2_set_rx_mode(bp->dev);
4624 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4625 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4626 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4627 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4629 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4632 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4633 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4637 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4643 bnx2_clear_ring_states(struct bnx2 *bp)
4645 struct bnx2_napi *bnapi;
4646 struct bnx2_tx_ring_info *txr;
4647 struct bnx2_rx_ring_info *rxr;
4650 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4651 bnapi = &bp->bnx2_napi[i];
4652 txr = &bnapi->tx_ring;
4653 rxr = &bnapi->rx_ring;
4656 txr->hw_tx_cons = 0;
4657 rxr->rx_prod_bseq = 0;
4660 rxr->rx_pg_prod = 0;
4661 rxr->rx_pg_cons = 0;
4666 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
4668 u32 val, offset0, offset1, offset2, offset3;
4669 u32 cid_addr = GET_CID_ADDR(cid);
4671 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4672 offset0 = BNX2_L2CTX_TYPE_XI;
4673 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4674 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4675 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4677 offset0 = BNX2_L2CTX_TYPE;
4678 offset1 = BNX2_L2CTX_CMD_TYPE;
4679 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4680 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4682 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4683 bnx2_ctx_wr(bp, cid_addr, offset0, val);
4685 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4686 bnx2_ctx_wr(bp, cid_addr, offset1, val);
4688 val = (u64) txr->tx_desc_mapping >> 32;
4689 bnx2_ctx_wr(bp, cid_addr, offset2, val);
4691 val = (u64) txr->tx_desc_mapping & 0xffffffff;
4692 bnx2_ctx_wr(bp, cid_addr, offset3, val);
4696 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
4700 struct bnx2_napi *bnapi;
4701 struct bnx2_tx_ring_info *txr;
4703 bnapi = &bp->bnx2_napi[ring_num];
4704 txr = &bnapi->tx_ring;
4709 cid = TX_TSS_CID + ring_num - 1;
4711 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4713 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
4715 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
4716 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
4719 txr->tx_prod_bseq = 0;
4721 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4722 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4724 bnx2_init_tx_context(bp, cid, txr);
4728 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4734 for (i = 0; i < num_rings; i++) {
4737 rxbd = &rx_ring[i][0];
4738 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4739 rxbd->rx_bd_len = buf_size;
4740 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4742 if (i == (num_rings - 1))
4746 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4747 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4752 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
4755 u16 prod, ring_prod;
4756 u32 cid, rx_cid_addr, val;
4757 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
4758 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4763 cid = RX_RSS_CID + ring_num - 1;
4765 rx_cid_addr = GET_CID_ADDR(cid);
4767 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
4768 bp->rx_buf_use_size, bp->rx_max_ring);
4770 bnx2_init_rx_context(bp, cid);
4772 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4773 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4774 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4777 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4778 if (bp->rx_pg_ring_size) {
4779 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
4780 rxr->rx_pg_desc_mapping,
4781 PAGE_SIZE, bp->rx_max_pg_ring);
4782 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4783 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4784 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4785 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
4787 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
4788 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4790 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
4791 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4793 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4794 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4797 val = (u64) rxr->rx_desc_mapping[0] >> 32;
4798 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4800 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
4801 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4803 ring_prod = prod = rxr->rx_pg_prod;
4804 for (i = 0; i < bp->rx_pg_ring_size; i++) {
4805 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
4807 prod = NEXT_RX_BD(prod);
4808 ring_prod = RX_PG_RING_IDX(prod);
4810 rxr->rx_pg_prod = prod;
4812 ring_prod = prod = rxr->rx_prod;
4813 for (i = 0; i < bp->rx_ring_size; i++) {
4814 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
4816 prod = NEXT_RX_BD(prod);
4817 ring_prod = RX_RING_IDX(prod);
4819 rxr->rx_prod = prod;
4821 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
4822 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
4823 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
4825 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
4826 REG_WR16(bp, rxr->rx_bidx_addr, prod);
4828 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
4832 bnx2_init_all_rings(struct bnx2 *bp)
4837 bnx2_clear_ring_states(bp);
4839 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
4840 for (i = 0; i < bp->num_tx_rings; i++)
4841 bnx2_init_tx_ring(bp, i);
4843 if (bp->num_tx_rings > 1)
4844 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4847 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
4848 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
4850 for (i = 0; i < bp->num_rx_rings; i++)
4851 bnx2_init_rx_ring(bp, i);
4853 if (bp->num_rx_rings > 1) {
4855 u8 *tbl = (u8 *) &tbl_32;
4857 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
4858 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
4860 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
4861 tbl[i % 4] = i % (bp->num_rx_rings - 1);
4864 BNX2_RXP_SCRATCH_RSS_TBL + i,
4865 cpu_to_be32(tbl_32));
4868 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
4869 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
4871 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
4876 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4878 u32 max, num_rings = 1;
4880 while (ring_size > MAX_RX_DESC_CNT) {
4881 ring_size -= MAX_RX_DESC_CNT;
4884 /* round to next power of 2 */
4886 while ((max & num_rings) == 0)
4889 if (num_rings != max)
4896 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4898 u32 rx_size, rx_space, jumbo_size;
4900 /* 8 for CRC and VLAN */
4901 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
4903 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4904 sizeof(struct skb_shared_info);
4906 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
4907 bp->rx_pg_ring_size = 0;
4908 bp->rx_max_pg_ring = 0;
4909 bp->rx_max_pg_ring_idx = 0;
4910 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
4911 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4913 jumbo_size = size * pages;
4914 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4915 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4917 bp->rx_pg_ring_size = jumbo_size;
4918 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4920 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4921 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
4922 bp->rx_copy_thresh = 0;
4925 bp->rx_buf_use_size = rx_size;
4927 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4928 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
4929 bp->rx_ring_size = size;
4930 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4931 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4935 bnx2_free_tx_skbs(struct bnx2 *bp)
4939 for (i = 0; i < bp->num_tx_rings; i++) {
4940 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4941 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
4944 if (txr->tx_buf_ring == NULL)
4947 for (j = 0; j < TX_DESC_CNT; ) {
4948 struct sw_bd *tx_buf = &txr->tx_buf_ring[j];
4949 struct sk_buff *skb = tx_buf->skb;
4957 pci_unmap_single(bp->pdev,
4958 pci_unmap_addr(tx_buf, mapping),
4959 skb_headlen(skb), PCI_DMA_TODEVICE);
4963 last = skb_shinfo(skb)->nr_frags;
4964 for (k = 0; k < last; k++) {
4965 tx_buf = &txr->tx_buf_ring[j + k + 1];
4966 pci_unmap_page(bp->pdev,
4967 pci_unmap_addr(tx_buf, mapping),
4968 skb_shinfo(skb)->frags[j].size,
4978 bnx2_free_rx_skbs(struct bnx2 *bp)
4982 for (i = 0; i < bp->num_rx_rings; i++) {
4983 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
4984 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4987 if (rxr->rx_buf_ring == NULL)
4990 for (j = 0; j < bp->rx_max_ring_idx; j++) {
4991 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
4992 struct sk_buff *skb = rx_buf->skb;
4997 pci_unmap_single(bp->pdev,
4998 pci_unmap_addr(rx_buf, mapping),
4999 bp->rx_buf_use_size,
5000 PCI_DMA_FROMDEVICE);
5006 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5007 bnx2_free_rx_page(bp, rxr, j);
5012 bnx2_free_skbs(struct bnx2 *bp)
5014 bnx2_free_tx_skbs(bp);
5015 bnx2_free_rx_skbs(bp);
5019 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5023 rc = bnx2_reset_chip(bp, reset_code);
5028 if ((rc = bnx2_init_chip(bp)) != 0)
5031 bnx2_init_all_rings(bp);
5036 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5040 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5043 spin_lock_bh(&bp->phy_lock);
5044 bnx2_init_phy(bp, reset_phy);
5046 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5047 bnx2_remote_phy_event(bp);
5048 spin_unlock_bh(&bp->phy_lock);
5053 bnx2_test_registers(struct bnx2 *bp)
5057 static const struct {
5060 #define BNX2_FL_NOT_5709 1
5064 { 0x006c, 0, 0x00000000, 0x0000003f },
5065 { 0x0090, 0, 0xffffffff, 0x00000000 },
5066 { 0x0094, 0, 0x00000000, 0x00000000 },
5068 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5069 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5070 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5071 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5072 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5073 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5074 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5075 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5076 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5078 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5079 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5080 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5081 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5082 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5083 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5085 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5086 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5087 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5089 { 0x1000, 0, 0x00000000, 0x00000001 },
5090 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5092 { 0x1408, 0, 0x01c00800, 0x00000000 },
5093 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5094 { 0x14a8, 0, 0x00000000, 0x000001ff },
5095 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5096 { 0x14b0, 0, 0x00000002, 0x00000001 },
5097 { 0x14b8, 0, 0x00000000, 0x00000000 },
5098 { 0x14c0, 0, 0x00000000, 0x00000009 },
5099 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5100 { 0x14cc, 0, 0x00000000, 0x00000001 },
5101 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5103 { 0x1800, 0, 0x00000000, 0x00000001 },
5104 { 0x1804, 0, 0x00000000, 0x00000003 },
5106 { 0x2800, 0, 0x00000000, 0x00000001 },
5107 { 0x2804, 0, 0x00000000, 0x00003f01 },
5108 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5109 { 0x2810, 0, 0xffff0000, 0x00000000 },
5110 { 0x2814, 0, 0xffff0000, 0x00000000 },
5111 { 0x2818, 0, 0xffff0000, 0x00000000 },
5112 { 0x281c, 0, 0xffff0000, 0x00000000 },
5113 { 0x2834, 0, 0xffffffff, 0x00000000 },
5114 { 0x2840, 0, 0x00000000, 0xffffffff },
5115 { 0x2844, 0, 0x00000000, 0xffffffff },
5116 { 0x2848, 0, 0xffffffff, 0x00000000 },
5117 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5119 { 0x2c00, 0, 0x00000000, 0x00000011 },
5120 { 0x2c04, 0, 0x00000000, 0x00030007 },
5122 { 0x3c00, 0, 0x00000000, 0x00000001 },
5123 { 0x3c04, 0, 0x00000000, 0x00070000 },
5124 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5125 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5126 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5127 { 0x3c14, 0, 0x00000000, 0xffffffff },
5128 { 0x3c18, 0, 0x00000000, 0xffffffff },
5129 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5130 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5132 { 0x5004, 0, 0x00000000, 0x0000007f },
5133 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5135 { 0x5c00, 0, 0x00000000, 0x00000001 },
5136 { 0x5c04, 0, 0x00000000, 0x0003000f },
5137 { 0x5c08, 0, 0x00000003, 0x00000000 },
5138 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5139 { 0x5c10, 0, 0x00000000, 0xffffffff },
5140 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5141 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5142 { 0x5c88, 0, 0x00000000, 0x00077373 },
5143 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5145 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5146 { 0x680c, 0, 0xffffffff, 0x00000000 },
5147 { 0x6810, 0, 0xffffffff, 0x00000000 },
5148 { 0x6814, 0, 0xffffffff, 0x00000000 },
5149 { 0x6818, 0, 0xffffffff, 0x00000000 },
5150 { 0x681c, 0, 0xffffffff, 0x00000000 },
5151 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5152 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5153 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5154 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5155 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5156 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5157 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5158 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5159 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5160 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5161 { 0x684c, 0, 0xffffffff, 0x00000000 },
5162 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5163 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5164 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5165 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5166 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5167 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5169 { 0xffff, 0, 0x00000000, 0x00000000 },
5174 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5177 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5178 u32 offset, rw_mask, ro_mask, save_val, val;
5179 u16 flags = reg_tbl[i].flags;
5181 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5184 offset = (u32) reg_tbl[i].offset;
5185 rw_mask = reg_tbl[i].rw_mask;
5186 ro_mask = reg_tbl[i].ro_mask;
5188 save_val = readl(bp->regview + offset);
5190 writel(0, bp->regview + offset);
5192 val = readl(bp->regview + offset);
5193 if ((val & rw_mask) != 0) {
5197 if ((val & ro_mask) != (save_val & ro_mask)) {
5201 writel(0xffffffff, bp->regview + offset);
5203 val = readl(bp->regview + offset);
5204 if ((val & rw_mask) != rw_mask) {
5208 if ((val & ro_mask) != (save_val & ro_mask)) {
5212 writel(save_val, bp->regview + offset);
5216 writel(save_val, bp->regview + offset);
5224 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5226 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5227 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5230 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5233 for (offset = 0; offset < size; offset += 4) {
5235 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5237 if (bnx2_reg_rd_ind(bp, start + offset) !=
5247 bnx2_test_memory(struct bnx2 *bp)
5251 static struct mem_entry {
5254 } mem_tbl_5706[] = {
5255 { 0x60000, 0x4000 },
5256 { 0xa0000, 0x3000 },
5257 { 0xe0000, 0x4000 },
5258 { 0x120000, 0x4000 },
5259 { 0x1a0000, 0x4000 },
5260 { 0x160000, 0x4000 },
5264 { 0x60000, 0x4000 },
5265 { 0xa0000, 0x3000 },
5266 { 0xe0000, 0x4000 },
5267 { 0x120000, 0x4000 },
5268 { 0x1a0000, 0x4000 },
5271 struct mem_entry *mem_tbl;
5273 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5274 mem_tbl = mem_tbl_5709;
5276 mem_tbl = mem_tbl_5706;
5278 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5279 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5280 mem_tbl[i].len)) != 0) {
5288 #define BNX2_MAC_LOOPBACK 0
5289 #define BNX2_PHY_LOOPBACK 1
5292 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5294 unsigned int pkt_size, num_pkts, i;
5295 struct sk_buff *skb, *rx_skb;
5296 unsigned char *packet;
5297 u16 rx_start_idx, rx_idx;
5300 struct sw_bd *rx_buf;
5301 struct l2_fhdr *rx_hdr;
5303 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5304 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5305 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5309 txr = &tx_napi->tx_ring;
5310 rxr = &bnapi->rx_ring;
5311 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5312 bp->loopback = MAC_LOOPBACK;
5313 bnx2_set_mac_loopback(bp);
5315 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5316 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5319 bp->loopback = PHY_LOOPBACK;
5320 bnx2_set_phy_loopback(bp);
5325 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5326 skb = netdev_alloc_skb(bp->dev, pkt_size);
5329 packet = skb_put(skb, pkt_size);
5330 memcpy(packet, bp->dev->dev_addr, 6);
5331 memset(packet + 6, 0x0, 8);
5332 for (i = 14; i < pkt_size; i++)
5333 packet[i] = (unsigned char) (i & 0xff);
5335 map = pci_map_single(bp->pdev, skb->data, pkt_size,
5338 REG_WR(bp, BNX2_HC_COMMAND,
5339 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5341 REG_RD(bp, BNX2_HC_COMMAND);
5344 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5348 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5350 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5351 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5352 txbd->tx_bd_mss_nbytes = pkt_size;
5353 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5356 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5357 txr->tx_prod_bseq += pkt_size;
5359 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5360 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5364 REG_WR(bp, BNX2_HC_COMMAND,
5365 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5367 REG_RD(bp, BNX2_HC_COMMAND);
5371 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5374 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5375 goto loopback_test_done;
5377 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5378 if (rx_idx != rx_start_idx + num_pkts) {
5379 goto loopback_test_done;
5382 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5383 rx_skb = rx_buf->skb;
5385 rx_hdr = (struct l2_fhdr *) rx_skb->data;
5386 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5388 pci_dma_sync_single_for_cpu(bp->pdev,
5389 pci_unmap_addr(rx_buf, mapping),
5390 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5392 if (rx_hdr->l2_fhdr_status &
5393 (L2_FHDR_ERRORS_BAD_CRC |
5394 L2_FHDR_ERRORS_PHY_DECODE |
5395 L2_FHDR_ERRORS_ALIGNMENT |
5396 L2_FHDR_ERRORS_TOO_SHORT |
5397 L2_FHDR_ERRORS_GIANT_FRAME)) {
5399 goto loopback_test_done;
5402 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5403 goto loopback_test_done;
5406 for (i = 14; i < pkt_size; i++) {
5407 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5408 goto loopback_test_done;
5419 #define BNX2_MAC_LOOPBACK_FAILED 1
5420 #define BNX2_PHY_LOOPBACK_FAILED 2
5421 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5422 BNX2_PHY_LOOPBACK_FAILED)
5425 bnx2_test_loopback(struct bnx2 *bp)
5429 if (!netif_running(bp->dev))
5430 return BNX2_LOOPBACK_FAILED;
5432 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5433 spin_lock_bh(&bp->phy_lock);
5434 bnx2_init_phy(bp, 1);
5435 spin_unlock_bh(&bp->phy_lock);
5436 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5437 rc |= BNX2_MAC_LOOPBACK_FAILED;
5438 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5439 rc |= BNX2_PHY_LOOPBACK_FAILED;
5443 #define NVRAM_SIZE 0x200
5444 #define CRC32_RESIDUAL 0xdebb20e3
5447 bnx2_test_nvram(struct bnx2 *bp)
5449 __be32 buf[NVRAM_SIZE / 4];
5450 u8 *data = (u8 *) buf;
5454 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5455 goto test_nvram_done;
5457 magic = be32_to_cpu(buf[0]);
5458 if (magic != 0x669955aa) {
5460 goto test_nvram_done;
5463 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5464 goto test_nvram_done;
5466 csum = ether_crc_le(0x100, data);
5467 if (csum != CRC32_RESIDUAL) {
5469 goto test_nvram_done;
5472 csum = ether_crc_le(0x100, data + 0x100);
5473 if (csum != CRC32_RESIDUAL) {
5482 bnx2_test_link(struct bnx2 *bp)
5486 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5491 spin_lock_bh(&bp->phy_lock);
5492 bnx2_enable_bmsr1(bp);
5493 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5494 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5495 bnx2_disable_bmsr1(bp);
5496 spin_unlock_bh(&bp->phy_lock);
5498 if (bmsr & BMSR_LSTATUS) {
5505 bnx2_test_intr(struct bnx2 *bp)
5510 if (!netif_running(bp->dev))
5513 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5515 /* This register is not touched during run-time. */
5516 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5517 REG_RD(bp, BNX2_HC_COMMAND);
5519 for (i = 0; i < 10; i++) {
5520 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5526 msleep_interruptible(10);
5534 /* Determining link for parallel detection. */
5536 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5538 u32 mode_ctl, an_dbg, exp;
5540 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5543 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5544 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5546 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5549 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5550 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5551 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5553 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5556 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5557 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5558 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5560 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5567 bnx2_5706_serdes_timer(struct bnx2 *bp)
5571 spin_lock(&bp->phy_lock);
5572 if (bp->serdes_an_pending) {
5573 bp->serdes_an_pending--;
5575 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5578 bp->current_interval = bp->timer_interval;
5580 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5582 if (bmcr & BMCR_ANENABLE) {
5583 if (bnx2_5706_serdes_has_link(bp)) {
5584 bmcr &= ~BMCR_ANENABLE;
5585 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5586 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5587 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5591 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5592 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5595 bnx2_write_phy(bp, 0x17, 0x0f01);
5596 bnx2_read_phy(bp, 0x15, &phy2);
5600 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5601 bmcr |= BMCR_ANENABLE;
5602 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5604 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5607 bp->current_interval = bp->timer_interval;
5612 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5613 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5614 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5616 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5617 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5618 bnx2_5706s_force_link_dn(bp, 1);
5619 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5622 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5625 spin_unlock(&bp->phy_lock);
5629 bnx2_5708_serdes_timer(struct bnx2 *bp)
5631 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5634 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5635 bp->serdes_an_pending = 0;
5639 spin_lock(&bp->phy_lock);
5640 if (bp->serdes_an_pending)
5641 bp->serdes_an_pending--;
5642 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5645 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5646 if (bmcr & BMCR_ANENABLE) {
5647 bnx2_enable_forced_2g5(bp);
5648 bp->current_interval = SERDES_FORCED_TIMEOUT;
5650 bnx2_disable_forced_2g5(bp);
5651 bp->serdes_an_pending = 2;
5652 bp->current_interval = bp->timer_interval;
5656 bp->current_interval = bp->timer_interval;
5658 spin_unlock(&bp->phy_lock);
5662 bnx2_timer(unsigned long data)
5664 struct bnx2 *bp = (struct bnx2 *) data;
5666 if (!netif_running(bp->dev))
5669 if (atomic_read(&bp->intr_sem) != 0)
5670 goto bnx2_restart_timer;
5672 bnx2_send_heart_beat(bp);
5674 bp->stats_blk->stat_FwRxDrop =
5675 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5677 /* workaround occasional corrupted counters */
5678 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5679 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5680 BNX2_HC_COMMAND_STATS_NOW);
5682 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5683 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5684 bnx2_5706_serdes_timer(bp);
5686 bnx2_5708_serdes_timer(bp);
5690 mod_timer(&bp->timer, jiffies + bp->current_interval);
5694 bnx2_request_irq(struct bnx2 *bp)
5696 unsigned long flags;
5697 struct bnx2_irq *irq;
5700 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5703 flags = IRQF_SHARED;
5705 for (i = 0; i < bp->irq_nvecs; i++) {
5706 irq = &bp->irq_tbl[i];
5707 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5717 bnx2_free_irq(struct bnx2 *bp)
5719 struct bnx2_irq *irq;
5722 for (i = 0; i < bp->irq_nvecs; i++) {
5723 irq = &bp->irq_tbl[i];
5725 free_irq(irq->vector, &bp->bnx2_napi[i]);
5728 if (bp->flags & BNX2_FLAG_USING_MSI)
5729 pci_disable_msi(bp->pdev);
5730 else if (bp->flags & BNX2_FLAG_USING_MSIX)
5731 pci_disable_msix(bp->pdev);
5733 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5737 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
5740 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5742 bnx2_setup_msix_tbl(bp);
5743 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5744 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5745 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5747 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5748 msix_ent[i].entry = i;
5749 msix_ent[i].vector = 0;
5751 strcpy(bp->irq_tbl[i].name, bp->dev->name);
5752 bp->irq_tbl[i].handler = bnx2_msi_1shot;
5755 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5759 bp->irq_nvecs = msix_vecs;
5760 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5761 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5762 bp->irq_tbl[i].vector = msix_ent[i].vector;
5766 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5768 int cpus = num_online_cpus();
5769 int msix_vecs = min(cpus + 1, RX_MAX_RSS_RINGS);
5771 bp->irq_tbl[0].handler = bnx2_interrupt;
5772 strcpy(bp->irq_tbl[0].name, bp->dev->name);
5774 bp->irq_tbl[0].vector = bp->pdev->irq;
5776 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
5777 bnx2_enable_msix(bp, msix_vecs);
5779 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5780 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5781 if (pci_enable_msi(bp->pdev) == 0) {
5782 bp->flags |= BNX2_FLAG_USING_MSI;
5783 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5784 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5785 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5787 bp->irq_tbl[0].handler = bnx2_msi;
5789 bp->irq_tbl[0].vector = bp->pdev->irq;
5792 bp->num_tx_rings = 1;
5793 bp->num_rx_rings = bp->irq_nvecs;
5796 /* Called with rtnl_lock */
5798 bnx2_open(struct net_device *dev)
5800 struct bnx2 *bp = netdev_priv(dev);
5803 netif_carrier_off(dev);
5805 bnx2_set_power_state(bp, PCI_D0);
5806 bnx2_disable_int(bp);
5808 bnx2_setup_int_mode(bp, disable_msi);
5809 bnx2_napi_enable(bp);
5810 rc = bnx2_alloc_mem(bp);
5814 rc = bnx2_request_irq(bp);
5818 rc = bnx2_init_nic(bp, 1);
5822 mod_timer(&bp->timer, jiffies + bp->current_interval);
5824 atomic_set(&bp->intr_sem, 0);
5826 bnx2_enable_int(bp);
5828 if (bp->flags & BNX2_FLAG_USING_MSI) {
5829 /* Test MSI to make sure it is working
5830 * If MSI test fails, go back to INTx mode
5832 if (bnx2_test_intr(bp) != 0) {
5833 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5834 " using MSI, switching to INTx mode. Please"
5835 " report this failure to the PCI maintainer"
5836 " and include system chipset information.\n",
5839 bnx2_disable_int(bp);
5842 bnx2_setup_int_mode(bp, 1);
5844 rc = bnx2_init_nic(bp, 0);
5847 rc = bnx2_request_irq(bp);
5850 del_timer_sync(&bp->timer);
5853 bnx2_enable_int(bp);
5856 if (bp->flags & BNX2_FLAG_USING_MSI)
5857 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5858 else if (bp->flags & BNX2_FLAG_USING_MSIX)
5859 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5861 netif_start_queue(dev);
5866 bnx2_napi_disable(bp);
5874 bnx2_reset_task(struct work_struct *work)
5876 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5878 if (!netif_running(bp->dev))
5881 bnx2_netif_stop(bp);
5883 bnx2_init_nic(bp, 1);
5885 atomic_set(&bp->intr_sem, 1);
5886 bnx2_netif_start(bp);
5890 bnx2_tx_timeout(struct net_device *dev)
5892 struct bnx2 *bp = netdev_priv(dev);
5894 /* This allows the netif to be shutdown gracefully before resetting */
5895 schedule_work(&bp->reset_task);
5899 /* Called with rtnl_lock */
5901 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5903 struct bnx2 *bp = netdev_priv(dev);
5905 bnx2_netif_stop(bp);
5908 bnx2_set_rx_mode(dev);
5909 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
5910 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
5912 bnx2_netif_start(bp);
5916 /* Called with netif_tx_lock.
5917 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5918 * netif_wake_queue().
5921 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5923 struct bnx2 *bp = netdev_priv(dev);
5926 struct sw_bd *tx_buf;
5927 u32 len, vlan_tag_flags, last_frag, mss;
5928 u16 prod, ring_prod;
5930 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
5931 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5933 if (unlikely(bnx2_tx_avail(bp, txr) <
5934 (skb_shinfo(skb)->nr_frags + 1))) {
5935 netif_stop_queue(dev);
5936 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5939 return NETDEV_TX_BUSY;
5941 len = skb_headlen(skb);
5942 prod = txr->tx_prod;
5943 ring_prod = TX_RING_IDX(prod);
5946 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5947 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5950 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
5952 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5954 if ((mss = skb_shinfo(skb)->gso_size)) {
5955 u32 tcp_opt_len, ip_tcp_len;
5958 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5960 tcp_opt_len = tcp_optlen(skb);
5962 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5963 u32 tcp_off = skb_transport_offset(skb) -
5964 sizeof(struct ipv6hdr) - ETH_HLEN;
5966 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5967 TX_BD_FLAGS_SW_FLAGS;
5968 if (likely(tcp_off == 0))
5969 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5972 vlan_tag_flags |= ((tcp_off & 0x3) <<
5973 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5974 ((tcp_off & 0x10) <<
5975 TX_BD_FLAGS_TCP6_OFF4_SHL);
5976 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5979 if (skb_header_cloned(skb) &&
5980 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5982 return NETDEV_TX_OK;
5985 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5989 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5990 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5994 if (tcp_opt_len || (iph->ihl > 5)) {
5995 vlan_tag_flags |= ((iph->ihl - 5) +
5996 (tcp_opt_len >> 2)) << 8;
6002 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6004 tx_buf = &txr->tx_buf_ring[ring_prod];
6006 pci_unmap_addr_set(tx_buf, mapping, mapping);
6008 txbd = &txr->tx_desc_ring[ring_prod];
6010 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6011 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6012 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6013 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6015 last_frag = skb_shinfo(skb)->nr_frags;
6017 for (i = 0; i < last_frag; i++) {
6018 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6020 prod = NEXT_TX_BD(prod);
6021 ring_prod = TX_RING_IDX(prod);
6022 txbd = &txr->tx_desc_ring[ring_prod];
6025 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
6026 len, PCI_DMA_TODEVICE);
6027 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod],
6030 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6031 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6032 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6033 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6036 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6038 prod = NEXT_TX_BD(prod);
6039 txr->tx_prod_bseq += skb->len;
6041 REG_WR16(bp, txr->tx_bidx_addr, prod);
6042 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6046 txr->tx_prod = prod;
6047 dev->trans_start = jiffies;
6049 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6050 netif_stop_queue(dev);
6051 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6052 netif_wake_queue(dev);
6055 return NETDEV_TX_OK;
6058 /* Called with rtnl_lock */
6060 bnx2_close(struct net_device *dev)
6062 struct bnx2 *bp = netdev_priv(dev);
6065 cancel_work_sync(&bp->reset_task);
6067 bnx2_disable_int_sync(bp);
6068 bnx2_napi_disable(bp);
6069 del_timer_sync(&bp->timer);
6070 if (bp->flags & BNX2_FLAG_NO_WOL)
6071 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6073 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6075 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6076 bnx2_reset_chip(bp, reset_code);
6081 netif_carrier_off(bp->dev);
6082 bnx2_set_power_state(bp, PCI_D3hot);
6086 #define GET_NET_STATS64(ctr) \
6087 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6088 (unsigned long) (ctr##_lo)
6090 #define GET_NET_STATS32(ctr) \
6093 #if (BITS_PER_LONG == 64)
6094 #define GET_NET_STATS GET_NET_STATS64
6096 #define GET_NET_STATS GET_NET_STATS32
6099 static struct net_device_stats *
6100 bnx2_get_stats(struct net_device *dev)
6102 struct bnx2 *bp = netdev_priv(dev);
6103 struct statistics_block *stats_blk = bp->stats_blk;
6104 struct net_device_stats *net_stats = &bp->net_stats;
6106 if (bp->stats_blk == NULL) {
6109 net_stats->rx_packets =
6110 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6111 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6112 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6114 net_stats->tx_packets =
6115 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6116 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6117 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6119 net_stats->rx_bytes =
6120 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6122 net_stats->tx_bytes =
6123 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6125 net_stats->multicast =
6126 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6128 net_stats->collisions =
6129 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6131 net_stats->rx_length_errors =
6132 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6133 stats_blk->stat_EtherStatsOverrsizePkts);
6135 net_stats->rx_over_errors =
6136 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
6138 net_stats->rx_frame_errors =
6139 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6141 net_stats->rx_crc_errors =
6142 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6144 net_stats->rx_errors = net_stats->rx_length_errors +
6145 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6146 net_stats->rx_crc_errors;
6148 net_stats->tx_aborted_errors =
6149 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6150 stats_blk->stat_Dot3StatsLateCollisions);
6152 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6153 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6154 net_stats->tx_carrier_errors = 0;
6156 net_stats->tx_carrier_errors =
6158 stats_blk->stat_Dot3StatsCarrierSenseErrors;
6161 net_stats->tx_errors =
6163 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6165 net_stats->tx_aborted_errors +
6166 net_stats->tx_carrier_errors;
6168 net_stats->rx_missed_errors =
6169 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6170 stats_blk->stat_FwRxDrop);
6175 /* All ethtool functions called with rtnl_lock */
6178 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6180 struct bnx2 *bp = netdev_priv(dev);
6181 int support_serdes = 0, support_copper = 0;
6183 cmd->supported = SUPPORTED_Autoneg;
6184 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6187 } else if (bp->phy_port == PORT_FIBRE)
6192 if (support_serdes) {
6193 cmd->supported |= SUPPORTED_1000baseT_Full |
6195 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6196 cmd->supported |= SUPPORTED_2500baseX_Full;
6199 if (support_copper) {
6200 cmd->supported |= SUPPORTED_10baseT_Half |
6201 SUPPORTED_10baseT_Full |
6202 SUPPORTED_100baseT_Half |
6203 SUPPORTED_100baseT_Full |
6204 SUPPORTED_1000baseT_Full |
6209 spin_lock_bh(&bp->phy_lock);
6210 cmd->port = bp->phy_port;
6211 cmd->advertising = bp->advertising;
6213 if (bp->autoneg & AUTONEG_SPEED) {
6214 cmd->autoneg = AUTONEG_ENABLE;
6217 cmd->autoneg = AUTONEG_DISABLE;
6220 if (netif_carrier_ok(dev)) {
6221 cmd->speed = bp->line_speed;
6222 cmd->duplex = bp->duplex;
6228 spin_unlock_bh(&bp->phy_lock);
6230 cmd->transceiver = XCVR_INTERNAL;
6231 cmd->phy_address = bp->phy_addr;
6237 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6239 struct bnx2 *bp = netdev_priv(dev);
6240 u8 autoneg = bp->autoneg;
6241 u8 req_duplex = bp->req_duplex;
6242 u16 req_line_speed = bp->req_line_speed;
6243 u32 advertising = bp->advertising;
6246 spin_lock_bh(&bp->phy_lock);
6248 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6249 goto err_out_unlock;
6251 if (cmd->port != bp->phy_port &&
6252 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6253 goto err_out_unlock;
6255 /* If device is down, we can store the settings only if the user
6256 * is setting the currently active port.
6258 if (!netif_running(dev) && cmd->port != bp->phy_port)
6259 goto err_out_unlock;
6261 if (cmd->autoneg == AUTONEG_ENABLE) {
6262 autoneg |= AUTONEG_SPEED;
6264 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6266 /* allow advertising 1 speed */
6267 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6268 (cmd->advertising == ADVERTISED_10baseT_Full) ||
6269 (cmd->advertising == ADVERTISED_100baseT_Half) ||
6270 (cmd->advertising == ADVERTISED_100baseT_Full)) {
6272 if (cmd->port == PORT_FIBRE)
6273 goto err_out_unlock;
6275 advertising = cmd->advertising;
6277 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6278 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6279 (cmd->port == PORT_TP))
6280 goto err_out_unlock;
6281 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6282 advertising = cmd->advertising;
6283 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6284 goto err_out_unlock;
6286 if (cmd->port == PORT_FIBRE)
6287 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6289 advertising = ETHTOOL_ALL_COPPER_SPEED;
6291 advertising |= ADVERTISED_Autoneg;
6294 if (cmd->port == PORT_FIBRE) {
6295 if ((cmd->speed != SPEED_1000 &&
6296 cmd->speed != SPEED_2500) ||
6297 (cmd->duplex != DUPLEX_FULL))
6298 goto err_out_unlock;
6300 if (cmd->speed == SPEED_2500 &&
6301 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6302 goto err_out_unlock;
6304 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6305 goto err_out_unlock;
6307 autoneg &= ~AUTONEG_SPEED;
6308 req_line_speed = cmd->speed;
6309 req_duplex = cmd->duplex;
6313 bp->autoneg = autoneg;
6314 bp->advertising = advertising;
6315 bp->req_line_speed = req_line_speed;
6316 bp->req_duplex = req_duplex;
6319 /* If device is down, the new settings will be picked up when it is
6322 if (netif_running(dev))
6323 err = bnx2_setup_phy(bp, cmd->port);
6326 spin_unlock_bh(&bp->phy_lock);
6332 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6334 struct bnx2 *bp = netdev_priv(dev);
6336 strcpy(info->driver, DRV_MODULE_NAME);
6337 strcpy(info->version, DRV_MODULE_VERSION);
6338 strcpy(info->bus_info, pci_name(bp->pdev));
6339 strcpy(info->fw_version, bp->fw_version);
6342 #define BNX2_REGDUMP_LEN (32 * 1024)
6345 bnx2_get_regs_len(struct net_device *dev)
6347 return BNX2_REGDUMP_LEN;
6351 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6353 u32 *p = _p, i, offset;
6355 struct bnx2 *bp = netdev_priv(dev);
6356 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6357 0x0800, 0x0880, 0x0c00, 0x0c10,
6358 0x0c30, 0x0d08, 0x1000, 0x101c,
6359 0x1040, 0x1048, 0x1080, 0x10a4,
6360 0x1400, 0x1490, 0x1498, 0x14f0,
6361 0x1500, 0x155c, 0x1580, 0x15dc,
6362 0x1600, 0x1658, 0x1680, 0x16d8,
6363 0x1800, 0x1820, 0x1840, 0x1854,
6364 0x1880, 0x1894, 0x1900, 0x1984,
6365 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6366 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6367 0x2000, 0x2030, 0x23c0, 0x2400,
6368 0x2800, 0x2820, 0x2830, 0x2850,
6369 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6370 0x3c00, 0x3c94, 0x4000, 0x4010,
6371 0x4080, 0x4090, 0x43c0, 0x4458,
6372 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6373 0x4fc0, 0x5010, 0x53c0, 0x5444,
6374 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6375 0x5fc0, 0x6000, 0x6400, 0x6428,
6376 0x6800, 0x6848, 0x684c, 0x6860,
6377 0x6888, 0x6910, 0x8000 };
6381 memset(p, 0, BNX2_REGDUMP_LEN);
6383 if (!netif_running(bp->dev))
6387 offset = reg_boundaries[0];
6389 while (offset < BNX2_REGDUMP_LEN) {
6390 *p++ = REG_RD(bp, offset);
6392 if (offset == reg_boundaries[i + 1]) {
6393 offset = reg_boundaries[i + 2];
6394 p = (u32 *) (orig_p + offset);
6401 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6403 struct bnx2 *bp = netdev_priv(dev);
6405 if (bp->flags & BNX2_FLAG_NO_WOL) {
6410 wol->supported = WAKE_MAGIC;
6412 wol->wolopts = WAKE_MAGIC;
6416 memset(&wol->sopass, 0, sizeof(wol->sopass));
6420 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6422 struct bnx2 *bp = netdev_priv(dev);
6424 if (wol->wolopts & ~WAKE_MAGIC)
6427 if (wol->wolopts & WAKE_MAGIC) {
6428 if (bp->flags & BNX2_FLAG_NO_WOL)
6440 bnx2_nway_reset(struct net_device *dev)
6442 struct bnx2 *bp = netdev_priv(dev);
6445 if (!(bp->autoneg & AUTONEG_SPEED)) {
6449 spin_lock_bh(&bp->phy_lock);
6451 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6454 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6455 spin_unlock_bh(&bp->phy_lock);
6459 /* Force a link down visible on the other side */
6460 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6461 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6462 spin_unlock_bh(&bp->phy_lock);
6466 spin_lock_bh(&bp->phy_lock);
6468 bp->current_interval = SERDES_AN_TIMEOUT;
6469 bp->serdes_an_pending = 1;
6470 mod_timer(&bp->timer, jiffies + bp->current_interval);
6473 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6474 bmcr &= ~BMCR_LOOPBACK;
6475 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6477 spin_unlock_bh(&bp->phy_lock);
6483 bnx2_get_eeprom_len(struct net_device *dev)
6485 struct bnx2 *bp = netdev_priv(dev);
6487 if (bp->flash_info == NULL)
6490 return (int) bp->flash_size;
6494 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6497 struct bnx2 *bp = netdev_priv(dev);
6500 /* parameters already validated in ethtool_get_eeprom */
6502 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6508 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6511 struct bnx2 *bp = netdev_priv(dev);
6514 /* parameters already validated in ethtool_set_eeprom */
6516 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6522 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6524 struct bnx2 *bp = netdev_priv(dev);
6526 memset(coal, 0, sizeof(struct ethtool_coalesce));
6528 coal->rx_coalesce_usecs = bp->rx_ticks;
6529 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6530 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6531 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6533 coal->tx_coalesce_usecs = bp->tx_ticks;
6534 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6535 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6536 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6538 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6544 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6546 struct bnx2 *bp = netdev_priv(dev);
6548 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6549 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6551 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6552 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6554 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6555 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6557 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6558 if (bp->rx_quick_cons_trip_int > 0xff)
6559 bp->rx_quick_cons_trip_int = 0xff;
6561 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6562 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6564 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6565 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6567 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6568 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6570 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6571 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6574 bp->stats_ticks = coal->stats_block_coalesce_usecs;
6575 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6576 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6577 bp->stats_ticks = USEC_PER_SEC;
6579 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6580 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6581 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6583 if (netif_running(bp->dev)) {
6584 bnx2_netif_stop(bp);
6585 bnx2_init_nic(bp, 0);
6586 bnx2_netif_start(bp);
6593 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6595 struct bnx2 *bp = netdev_priv(dev);
6597 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6598 ering->rx_mini_max_pending = 0;
6599 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6601 ering->rx_pending = bp->rx_ring_size;
6602 ering->rx_mini_pending = 0;
6603 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6605 ering->tx_max_pending = MAX_TX_DESC_CNT;
6606 ering->tx_pending = bp->tx_ring_size;
6610 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6612 if (netif_running(bp->dev)) {
6613 bnx2_netif_stop(bp);
6614 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6619 bnx2_set_rx_ring_size(bp, rx);
6620 bp->tx_ring_size = tx;
6622 if (netif_running(bp->dev)) {
6625 rc = bnx2_alloc_mem(bp);
6628 bnx2_init_nic(bp, 0);
6629 bnx2_netif_start(bp);
6635 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6637 struct bnx2 *bp = netdev_priv(dev);
6640 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6641 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6642 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6646 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6651 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6653 struct bnx2 *bp = netdev_priv(dev);
6655 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6656 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6657 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6661 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6663 struct bnx2 *bp = netdev_priv(dev);
6665 bp->req_flow_ctrl = 0;
6666 if (epause->rx_pause)
6667 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6668 if (epause->tx_pause)
6669 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6671 if (epause->autoneg) {
6672 bp->autoneg |= AUTONEG_FLOW_CTRL;
6675 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6678 spin_lock_bh(&bp->phy_lock);
6680 bnx2_setup_phy(bp, bp->phy_port);
6682 spin_unlock_bh(&bp->phy_lock);
6688 bnx2_get_rx_csum(struct net_device *dev)
6690 struct bnx2 *bp = netdev_priv(dev);
6696 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6698 struct bnx2 *bp = netdev_priv(dev);
6705 bnx2_set_tso(struct net_device *dev, u32 data)
6707 struct bnx2 *bp = netdev_priv(dev);
6710 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6711 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6712 dev->features |= NETIF_F_TSO6;
6714 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6719 #define BNX2_NUM_STATS 46
6722 char string[ETH_GSTRING_LEN];
6723 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6725 { "rx_error_bytes" },
6727 { "tx_error_bytes" },
6728 { "rx_ucast_packets" },
6729 { "rx_mcast_packets" },
6730 { "rx_bcast_packets" },
6731 { "tx_ucast_packets" },
6732 { "tx_mcast_packets" },
6733 { "tx_bcast_packets" },
6734 { "tx_mac_errors" },
6735 { "tx_carrier_errors" },
6736 { "rx_crc_errors" },
6737 { "rx_align_errors" },
6738 { "tx_single_collisions" },
6739 { "tx_multi_collisions" },
6741 { "tx_excess_collisions" },
6742 { "tx_late_collisions" },
6743 { "tx_total_collisions" },
6746 { "rx_undersize_packets" },
6747 { "rx_oversize_packets" },
6748 { "rx_64_byte_packets" },
6749 { "rx_65_to_127_byte_packets" },
6750 { "rx_128_to_255_byte_packets" },
6751 { "rx_256_to_511_byte_packets" },
6752 { "rx_512_to_1023_byte_packets" },
6753 { "rx_1024_to_1522_byte_packets" },
6754 { "rx_1523_to_9022_byte_packets" },
6755 { "tx_64_byte_packets" },
6756 { "tx_65_to_127_byte_packets" },
6757 { "tx_128_to_255_byte_packets" },
6758 { "tx_256_to_511_byte_packets" },
6759 { "tx_512_to_1023_byte_packets" },
6760 { "tx_1024_to_1522_byte_packets" },
6761 { "tx_1523_to_9022_byte_packets" },
6762 { "rx_xon_frames" },
6763 { "rx_xoff_frames" },
6764 { "tx_xon_frames" },
6765 { "tx_xoff_frames" },
6766 { "rx_mac_ctrl_frames" },
6767 { "rx_filtered_packets" },
6769 { "rx_fw_discards" },
6772 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6774 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6775 STATS_OFFSET32(stat_IfHCInOctets_hi),
6776 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6777 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6778 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6779 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6780 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6781 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6782 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6783 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6784 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6785 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6786 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6787 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6788 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6789 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6790 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6791 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6792 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6793 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6794 STATS_OFFSET32(stat_EtherStatsCollisions),
6795 STATS_OFFSET32(stat_EtherStatsFragments),
6796 STATS_OFFSET32(stat_EtherStatsJabbers),
6797 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6798 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6799 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6800 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6801 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6802 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6803 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6804 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6805 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6806 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6807 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6808 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6809 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6810 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6811 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6812 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6813 STATS_OFFSET32(stat_XonPauseFramesReceived),
6814 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6815 STATS_OFFSET32(stat_OutXonSent),
6816 STATS_OFFSET32(stat_OutXoffSent),
6817 STATS_OFFSET32(stat_MacControlFramesReceived),
6818 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6819 STATS_OFFSET32(stat_IfInMBUFDiscards),
6820 STATS_OFFSET32(stat_FwRxDrop),
6823 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6824 * skipped because of errata.
6826 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6827 8,0,8,8,8,8,8,8,8,8,
6828 4,0,4,4,4,4,4,4,4,4,
6829 4,4,4,4,4,4,4,4,4,4,
6830 4,4,4,4,4,4,4,4,4,4,
6834 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6835 8,0,8,8,8,8,8,8,8,8,
6836 4,4,4,4,4,4,4,4,4,4,
6837 4,4,4,4,4,4,4,4,4,4,
6838 4,4,4,4,4,4,4,4,4,4,
6842 #define BNX2_NUM_TESTS 6
6845 char string[ETH_GSTRING_LEN];
6846 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6847 { "register_test (offline)" },
6848 { "memory_test (offline)" },
6849 { "loopback_test (offline)" },
6850 { "nvram_test (online)" },
6851 { "interrupt_test (online)" },
6852 { "link_test (online)" },
6856 bnx2_get_sset_count(struct net_device *dev, int sset)
6860 return BNX2_NUM_TESTS;
6862 return BNX2_NUM_STATS;
6869 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6871 struct bnx2 *bp = netdev_priv(dev);
6873 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6874 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6877 bnx2_netif_stop(bp);
6878 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6881 if (bnx2_test_registers(bp) != 0) {
6883 etest->flags |= ETH_TEST_FL_FAILED;
6885 if (bnx2_test_memory(bp) != 0) {
6887 etest->flags |= ETH_TEST_FL_FAILED;
6889 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6890 etest->flags |= ETH_TEST_FL_FAILED;
6892 if (!netif_running(bp->dev)) {
6893 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6896 bnx2_init_nic(bp, 1);
6897 bnx2_netif_start(bp);
6900 /* wait for link up */
6901 for (i = 0; i < 7; i++) {
6904 msleep_interruptible(1000);
6908 if (bnx2_test_nvram(bp) != 0) {
6910 etest->flags |= ETH_TEST_FL_FAILED;
6912 if (bnx2_test_intr(bp) != 0) {
6914 etest->flags |= ETH_TEST_FL_FAILED;
6917 if (bnx2_test_link(bp) != 0) {
6919 etest->flags |= ETH_TEST_FL_FAILED;
6925 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6927 switch (stringset) {
6929 memcpy(buf, bnx2_stats_str_arr,
6930 sizeof(bnx2_stats_str_arr));
6933 memcpy(buf, bnx2_tests_str_arr,
6934 sizeof(bnx2_tests_str_arr));
6940 bnx2_get_ethtool_stats(struct net_device *dev,
6941 struct ethtool_stats *stats, u64 *buf)
6943 struct bnx2 *bp = netdev_priv(dev);
6945 u32 *hw_stats = (u32 *) bp->stats_blk;
6946 u8 *stats_len_arr = NULL;
6948 if (hw_stats == NULL) {
6949 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6953 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6954 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6955 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6956 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6957 stats_len_arr = bnx2_5706_stats_len_arr;
6959 stats_len_arr = bnx2_5708_stats_len_arr;
6961 for (i = 0; i < BNX2_NUM_STATS; i++) {
6962 if (stats_len_arr[i] == 0) {
6963 /* skip this counter */
6967 if (stats_len_arr[i] == 4) {
6968 /* 4-byte counter */
6970 *(hw_stats + bnx2_stats_offset_arr[i]);
6973 /* 8-byte counter */
6974 buf[i] = (((u64) *(hw_stats +
6975 bnx2_stats_offset_arr[i])) << 32) +
6976 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6981 bnx2_phys_id(struct net_device *dev, u32 data)
6983 struct bnx2 *bp = netdev_priv(dev);
6990 save = REG_RD(bp, BNX2_MISC_CFG);
6991 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6993 for (i = 0; i < (data * 2); i++) {
6995 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6998 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6999 BNX2_EMAC_LED_1000MB_OVERRIDE |
7000 BNX2_EMAC_LED_100MB_OVERRIDE |
7001 BNX2_EMAC_LED_10MB_OVERRIDE |
7002 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7003 BNX2_EMAC_LED_TRAFFIC);
7005 msleep_interruptible(500);
7006 if (signal_pending(current))
7009 REG_WR(bp, BNX2_EMAC_LED, 0);
7010 REG_WR(bp, BNX2_MISC_CFG, save);
7015 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7017 struct bnx2 *bp = netdev_priv(dev);
7019 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7020 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7022 return (ethtool_op_set_tx_csum(dev, data));
7025 static const struct ethtool_ops bnx2_ethtool_ops = {
7026 .get_settings = bnx2_get_settings,
7027 .set_settings = bnx2_set_settings,
7028 .get_drvinfo = bnx2_get_drvinfo,
7029 .get_regs_len = bnx2_get_regs_len,
7030 .get_regs = bnx2_get_regs,
7031 .get_wol = bnx2_get_wol,
7032 .set_wol = bnx2_set_wol,
7033 .nway_reset = bnx2_nway_reset,
7034 .get_link = ethtool_op_get_link,
7035 .get_eeprom_len = bnx2_get_eeprom_len,
7036 .get_eeprom = bnx2_get_eeprom,
7037 .set_eeprom = bnx2_set_eeprom,
7038 .get_coalesce = bnx2_get_coalesce,
7039 .set_coalesce = bnx2_set_coalesce,
7040 .get_ringparam = bnx2_get_ringparam,
7041 .set_ringparam = bnx2_set_ringparam,
7042 .get_pauseparam = bnx2_get_pauseparam,
7043 .set_pauseparam = bnx2_set_pauseparam,
7044 .get_rx_csum = bnx2_get_rx_csum,
7045 .set_rx_csum = bnx2_set_rx_csum,
7046 .set_tx_csum = bnx2_set_tx_csum,
7047 .set_sg = ethtool_op_set_sg,
7048 .set_tso = bnx2_set_tso,
7049 .self_test = bnx2_self_test,
7050 .get_strings = bnx2_get_strings,
7051 .phys_id = bnx2_phys_id,
7052 .get_ethtool_stats = bnx2_get_ethtool_stats,
7053 .get_sset_count = bnx2_get_sset_count,
7056 /* Called with rtnl_lock */
7058 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7060 struct mii_ioctl_data *data = if_mii(ifr);
7061 struct bnx2 *bp = netdev_priv(dev);
7066 data->phy_id = bp->phy_addr;
7072 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7075 if (!netif_running(dev))
7078 spin_lock_bh(&bp->phy_lock);
7079 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7080 spin_unlock_bh(&bp->phy_lock);
7082 data->val_out = mii_regval;
7088 if (!capable(CAP_NET_ADMIN))
7091 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7094 if (!netif_running(dev))
7097 spin_lock_bh(&bp->phy_lock);
7098 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7099 spin_unlock_bh(&bp->phy_lock);
7110 /* Called with rtnl_lock */
7112 bnx2_change_mac_addr(struct net_device *dev, void *p)
7114 struct sockaddr *addr = p;
7115 struct bnx2 *bp = netdev_priv(dev);
7117 if (!is_valid_ether_addr(addr->sa_data))
7120 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7121 if (netif_running(dev))
7122 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7127 /* Called with rtnl_lock */
7129 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7131 struct bnx2 *bp = netdev_priv(dev);
7133 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7134 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7138 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7141 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7143 poll_bnx2(struct net_device *dev)
7145 struct bnx2 *bp = netdev_priv(dev);
7147 disable_irq(bp->pdev->irq);
7148 bnx2_interrupt(bp->pdev->irq, dev);
7149 enable_irq(bp->pdev->irq);
7153 static void __devinit
7154 bnx2_get_5709_media(struct bnx2 *bp)
7156 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7157 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7160 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7162 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7163 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7167 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7168 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7170 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7172 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7177 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7185 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7191 static void __devinit
7192 bnx2_get_pci_speed(struct bnx2 *bp)
7196 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7197 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7200 bp->flags |= BNX2_FLAG_PCIX;
7202 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7204 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7206 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7207 bp->bus_speed_mhz = 133;
7210 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7211 bp->bus_speed_mhz = 100;
7214 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7215 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7216 bp->bus_speed_mhz = 66;
7219 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7220 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7221 bp->bus_speed_mhz = 50;
7224 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7225 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7226 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7227 bp->bus_speed_mhz = 33;
7232 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7233 bp->bus_speed_mhz = 66;
7235 bp->bus_speed_mhz = 33;
7238 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7239 bp->flags |= BNX2_FLAG_PCI_32BIT;
7243 static int __devinit
7244 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7247 unsigned long mem_len;
7250 u64 dma_mask, persist_dma_mask;
7252 SET_NETDEV_DEV(dev, &pdev->dev);
7253 bp = netdev_priv(dev);
7258 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7259 rc = pci_enable_device(pdev);
7261 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7265 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7267 "Cannot find PCI device base address, aborting.\n");
7269 goto err_out_disable;
7272 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7274 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7275 goto err_out_disable;
7278 pci_set_master(pdev);
7279 pci_save_state(pdev);
7281 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7282 if (bp->pm_cap == 0) {
7284 "Cannot find power management capability, aborting.\n");
7286 goto err_out_release;
7292 spin_lock_init(&bp->phy_lock);
7293 spin_lock_init(&bp->indirect_lock);
7294 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7296 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7297 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
7298 dev->mem_end = dev->mem_start + mem_len;
7299 dev->irq = pdev->irq;
7301 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7304 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7306 goto err_out_release;
7309 /* Configure byte swap and enable write to the reg_window registers.
7310 * Rely on CPU to do target byte swapping on big endian systems
7311 * The chip's target access swapping will not swap all accesses
7313 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7314 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7315 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7317 bnx2_set_power_state(bp, PCI_D0);
7319 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7321 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7322 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7324 "Cannot find PCIE capability, aborting.\n");
7328 bp->flags |= BNX2_FLAG_PCIE;
7329 if (CHIP_REV(bp) == CHIP_REV_Ax)
7330 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7332 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7333 if (bp->pcix_cap == 0) {
7335 "Cannot find PCIX capability, aborting.\n");
7341 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7342 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7343 bp->flags |= BNX2_FLAG_MSIX_CAP;
7346 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7347 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7348 bp->flags |= BNX2_FLAG_MSI_CAP;
7351 /* 5708 cannot support DMA addresses > 40-bit. */
7352 if (CHIP_NUM(bp) == CHIP_NUM_5708)
7353 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7355 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7357 /* Configure DMA attributes. */
7358 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7359 dev->features |= NETIF_F_HIGHDMA;
7360 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7363 "pci_set_consistent_dma_mask failed, aborting.\n");
7366 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7367 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7371 if (!(bp->flags & BNX2_FLAG_PCIE))
7372 bnx2_get_pci_speed(bp);
7374 /* 5706A0 may falsely detect SERR and PERR. */
7375 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7376 reg = REG_RD(bp, PCI_COMMAND);
7377 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7378 REG_WR(bp, PCI_COMMAND, reg);
7380 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7381 !(bp->flags & BNX2_FLAG_PCIX)) {
7384 "5706 A1 can only be used in a PCIX bus, aborting.\n");
7388 bnx2_init_nvram(bp);
7390 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7392 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7393 BNX2_SHM_HDR_SIGNATURE_SIG) {
7394 u32 off = PCI_FUNC(pdev->devfn) << 2;
7396 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7398 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7400 /* Get the permanent MAC address. First we need to make sure the
7401 * firmware is actually running.
7403 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7405 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7406 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7407 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7412 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7413 for (i = 0, j = 0; i < 3; i++) {
7416 num = (u8) (reg >> (24 - (i * 8)));
7417 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7418 if (num >= k || !skip0 || k == 1) {
7419 bp->fw_version[j++] = (num / k) + '0';
7424 bp->fw_version[j++] = '.';
7426 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7427 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7430 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7431 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7433 for (i = 0; i < 30; i++) {
7434 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7435 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7440 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7441 reg &= BNX2_CONDITION_MFW_RUN_MASK;
7442 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7443 reg != BNX2_CONDITION_MFW_RUN_NONE) {
7445 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7447 bp->fw_version[j++] = ' ';
7448 for (i = 0; i < 3; i++) {
7449 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7451 memcpy(&bp->fw_version[j], ®, 4);
7456 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7457 bp->mac_addr[0] = (u8) (reg >> 8);
7458 bp->mac_addr[1] = (u8) reg;
7460 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7461 bp->mac_addr[2] = (u8) (reg >> 24);
7462 bp->mac_addr[3] = (u8) (reg >> 16);
7463 bp->mac_addr[4] = (u8) (reg >> 8);
7464 bp->mac_addr[5] = (u8) reg;
7466 bp->tx_ring_size = MAX_TX_DESC_CNT;
7467 bnx2_set_rx_ring_size(bp, 255);
7471 bp->tx_quick_cons_trip_int = 20;
7472 bp->tx_quick_cons_trip = 20;
7473 bp->tx_ticks_int = 80;
7476 bp->rx_quick_cons_trip_int = 6;
7477 bp->rx_quick_cons_trip = 6;
7478 bp->rx_ticks_int = 18;
7481 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7483 bp->timer_interval = HZ;
7484 bp->current_interval = HZ;
7488 /* Disable WOL support if we are running on a SERDES chip. */
7489 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7490 bnx2_get_5709_media(bp);
7491 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7492 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7494 bp->phy_port = PORT_TP;
7495 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7496 bp->phy_port = PORT_FIBRE;
7497 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7498 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7499 bp->flags |= BNX2_FLAG_NO_WOL;
7502 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7503 /* Don't do parallel detect on this board because of
7504 * some board problems. The link will not go down
7505 * if we do parallel detect.
7507 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7508 pdev->subsystem_device == 0x310c)
7509 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7512 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7513 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7515 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7516 CHIP_NUM(bp) == CHIP_NUM_5708)
7517 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7518 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7519 (CHIP_REV(bp) == CHIP_REV_Ax ||
7520 CHIP_REV(bp) == CHIP_REV_Bx))
7521 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7523 bnx2_init_fw_cap(bp);
7525 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7526 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7527 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
7528 bp->flags |= BNX2_FLAG_NO_WOL;
7532 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7533 bp->tx_quick_cons_trip_int =
7534 bp->tx_quick_cons_trip;
7535 bp->tx_ticks_int = bp->tx_ticks;
7536 bp->rx_quick_cons_trip_int =
7537 bp->rx_quick_cons_trip;
7538 bp->rx_ticks_int = bp->rx_ticks;
7539 bp->comp_prod_trip_int = bp->comp_prod_trip;
7540 bp->com_ticks_int = bp->com_ticks;
7541 bp->cmd_ticks_int = bp->cmd_ticks;
7544 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7546 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7547 * with byte enables disabled on the unused 32-bit word. This is legal
7548 * but causes problems on the AMD 8132 which will eventually stop
7549 * responding after a while.
7551 * AMD believes this incompatibility is unique to the 5706, and
7552 * prefers to locally disable MSI rather than globally disabling it.
7554 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7555 struct pci_dev *amd_8132 = NULL;
7557 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7558 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7561 if (amd_8132->revision >= 0x10 &&
7562 amd_8132->revision <= 0x13) {
7564 pci_dev_put(amd_8132);
7570 bnx2_set_default_link(bp);
7571 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7573 init_timer(&bp->timer);
7574 bp->timer.expires = RUN_AT(bp->timer_interval);
7575 bp->timer.data = (unsigned long) bp;
7576 bp->timer.function = bnx2_timer;
7582 iounmap(bp->regview);
7587 pci_release_regions(pdev);
7590 pci_disable_device(pdev);
7591 pci_set_drvdata(pdev, NULL);
7597 static char * __devinit
7598 bnx2_bus_string(struct bnx2 *bp, char *str)
7602 if (bp->flags & BNX2_FLAG_PCIE) {
7603 s += sprintf(s, "PCI Express");
7605 s += sprintf(s, "PCI");
7606 if (bp->flags & BNX2_FLAG_PCIX)
7607 s += sprintf(s, "-X");
7608 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7609 s += sprintf(s, " 32-bit");
7611 s += sprintf(s, " 64-bit");
7612 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7617 static void __devinit
7618 bnx2_init_napi(struct bnx2 *bp)
7622 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7623 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
7624 int (*poll)(struct napi_struct *, int);
7629 poll = bnx2_poll_msix;
7631 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
7636 static int __devinit
7637 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7639 static int version_printed = 0;
7640 struct net_device *dev = NULL;
7644 DECLARE_MAC_BUF(mac);
7646 if (version_printed++ == 0)
7647 printk(KERN_INFO "%s", version);
7649 /* dev zeroed in init_etherdev */
7650 dev = alloc_etherdev(sizeof(*bp));
7655 rc = bnx2_init_board(pdev, dev);
7661 dev->open = bnx2_open;
7662 dev->hard_start_xmit = bnx2_start_xmit;
7663 dev->stop = bnx2_close;
7664 dev->get_stats = bnx2_get_stats;
7665 dev->set_rx_mode = bnx2_set_rx_mode;
7666 dev->do_ioctl = bnx2_ioctl;
7667 dev->set_mac_address = bnx2_change_mac_addr;
7668 dev->change_mtu = bnx2_change_mtu;
7669 dev->tx_timeout = bnx2_tx_timeout;
7670 dev->watchdog_timeo = TX_TIMEOUT;
7672 dev->vlan_rx_register = bnx2_vlan_rx_register;
7674 dev->ethtool_ops = &bnx2_ethtool_ops;
7676 bp = netdev_priv(dev);
7679 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7680 dev->poll_controller = poll_bnx2;
7683 pci_set_drvdata(pdev, dev);
7685 memcpy(dev->dev_addr, bp->mac_addr, 6);
7686 memcpy(dev->perm_addr, bp->mac_addr, 6);
7687 bp->name = board_info[ent->driver_data].name;
7689 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7690 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7691 dev->features |= NETIF_F_IPV6_CSUM;
7694 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7696 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7697 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7698 dev->features |= NETIF_F_TSO6;
7700 if ((rc = register_netdev(dev))) {
7701 dev_err(&pdev->dev, "Cannot register net device\n");
7703 iounmap(bp->regview);
7704 pci_release_regions(pdev);
7705 pci_disable_device(pdev);
7706 pci_set_drvdata(pdev, NULL);
7711 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7712 "IRQ %d, node addr %s\n",
7715 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7716 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7717 bnx2_bus_string(bp, str),
7719 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7724 static void __devexit
7725 bnx2_remove_one(struct pci_dev *pdev)
7727 struct net_device *dev = pci_get_drvdata(pdev);
7728 struct bnx2 *bp = netdev_priv(dev);
7730 flush_scheduled_work();
7732 unregister_netdev(dev);
7735 iounmap(bp->regview);
7738 pci_release_regions(pdev);
7739 pci_disable_device(pdev);
7740 pci_set_drvdata(pdev, NULL);
7744 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7746 struct net_device *dev = pci_get_drvdata(pdev);
7747 struct bnx2 *bp = netdev_priv(dev);
7750 /* PCI register 4 needs to be saved whether netif_running() or not.
7751 * MSI address and data need to be saved if using MSI and
7754 pci_save_state(pdev);
7755 if (!netif_running(dev))
7758 flush_scheduled_work();
7759 bnx2_netif_stop(bp);
7760 netif_device_detach(dev);
7761 del_timer_sync(&bp->timer);
7762 if (bp->flags & BNX2_FLAG_NO_WOL)
7763 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7765 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7767 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7768 bnx2_reset_chip(bp, reset_code);
7770 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7775 bnx2_resume(struct pci_dev *pdev)
7777 struct net_device *dev = pci_get_drvdata(pdev);
7778 struct bnx2 *bp = netdev_priv(dev);
7780 pci_restore_state(pdev);
7781 if (!netif_running(dev))
7784 bnx2_set_power_state(bp, PCI_D0);
7785 netif_device_attach(dev);
7786 bnx2_init_nic(bp, 1);
7787 bnx2_netif_start(bp);
7792 * bnx2_io_error_detected - called when PCI error is detected
7793 * @pdev: Pointer to PCI device
7794 * @state: The current pci connection state
7796 * This function is called after a PCI bus error affecting
7797 * this device has been detected.
7799 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7800 pci_channel_state_t state)
7802 struct net_device *dev = pci_get_drvdata(pdev);
7803 struct bnx2 *bp = netdev_priv(dev);
7806 netif_device_detach(dev);
7808 if (netif_running(dev)) {
7809 bnx2_netif_stop(bp);
7810 del_timer_sync(&bp->timer);
7811 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
7814 pci_disable_device(pdev);
7817 /* Request a slot slot reset. */
7818 return PCI_ERS_RESULT_NEED_RESET;
7822 * bnx2_io_slot_reset - called after the pci bus has been reset.
7823 * @pdev: Pointer to PCI device
7825 * Restart the card from scratch, as if from a cold-boot.
7827 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
7829 struct net_device *dev = pci_get_drvdata(pdev);
7830 struct bnx2 *bp = netdev_priv(dev);
7833 if (pci_enable_device(pdev)) {
7835 "Cannot re-enable PCI device after reset.\n");
7837 return PCI_ERS_RESULT_DISCONNECT;
7839 pci_set_master(pdev);
7840 pci_restore_state(pdev);
7842 if (netif_running(dev)) {
7843 bnx2_set_power_state(bp, PCI_D0);
7844 bnx2_init_nic(bp, 1);
7848 return PCI_ERS_RESULT_RECOVERED;
7852 * bnx2_io_resume - called when traffic can start flowing again.
7853 * @pdev: Pointer to PCI device
7855 * This callback is called when the error recovery driver tells us that
7856 * its OK to resume normal operation.
7858 static void bnx2_io_resume(struct pci_dev *pdev)
7860 struct net_device *dev = pci_get_drvdata(pdev);
7861 struct bnx2 *bp = netdev_priv(dev);
7864 if (netif_running(dev))
7865 bnx2_netif_start(bp);
7867 netif_device_attach(dev);
7871 static struct pci_error_handlers bnx2_err_handler = {
7872 .error_detected = bnx2_io_error_detected,
7873 .slot_reset = bnx2_io_slot_reset,
7874 .resume = bnx2_io_resume,
7877 static struct pci_driver bnx2_pci_driver = {
7878 .name = DRV_MODULE_NAME,
7879 .id_table = bnx2_pci_tbl,
7880 .probe = bnx2_init_one,
7881 .remove = __devexit_p(bnx2_remove_one),
7882 .suspend = bnx2_suspend,
7883 .resume = bnx2_resume,
7884 .err_handler = &bnx2_err_handler,
7887 static int __init bnx2_init(void)
7889 return pci_register_driver(&bnx2_pci_driver);
7892 static void __exit bnx2_cleanup(void)
7894 pci_unregister_driver(&bnx2_pci_driver);
7897 module_init(bnx2_init);
7898 module_exit(bnx2_cleanup);