1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2007 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define FW_BUF_SIZE 0x8000
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 #define DRV_MODULE_VERSION "1.6.5"
60 #define DRV_MODULE_RELDATE "September 20, 2007"
62 #define RUN_AT(x) (jiffies + (x))
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (5*HZ)
67 static const char version[] __devinitdata =
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
75 static int disable_msi = 0;
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92 /* indexed by board_t, above */
95 } board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
107 static struct pci_device_id bnx2_pci_tbl[] = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
129 static struct flash_spec flash_table[] =
131 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
134 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
138 /* Expansion entry 0001 */
139 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
143 /* Saifun SA25F010 (non-buffered flash) */
144 /* strap, cfg1, & write1 need updates */
145 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148 "Non-buffered flash (128kB)"},
149 /* Saifun SA25F020 (non-buffered flash) */
150 /* strap, cfg1, & write1 need updates */
151 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154 "Non-buffered flash (256kB)"},
155 /* Expansion entry 0100 */
156 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
160 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170 /* Saifun SA25F005 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175 "Non-buffered flash (64kB)"},
177 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
181 /* Expansion entry 1001 */
182 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 /* Expansion entry 1010 */
187 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
191 /* ATMEL AT45DB011B (buffered flash) */
192 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195 "Buffered flash (128kB)"},
196 /* Expansion entry 1100 */
197 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 /* Expansion entry 1101 */
202 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206 /* Ateml Expansion entry 1110 */
207 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1110 (Atmel)"},
211 /* ATMEL AT45DB021B (buffered flash) */
212 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215 "Buffered flash (256kB)"},
218 static struct flash_spec flash_5709 = {
219 .flags = BNX2_NV_BUFFERED,
220 .page_bits = BCM5709_FLASH_PAGE_BITS,
221 .page_size = BCM5709_FLASH_PAGE_SIZE,
222 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
223 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
224 .name = "5709 Buffered flash (256kB)",
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped.
238 diff = bp->tx_prod - bp->tx_cons;
239 if (unlikely(diff >= TX_DESC_CNT)) {
241 if (diff == TX_DESC_CNT)
242 diff = MAX_TX_DESC_CNT;
244 return (bp->tx_ring_size - diff);
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
252 spin_lock_bh(&bp->indirect_lock);
253 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255 spin_unlock_bh(&bp->indirect_lock);
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
262 spin_lock_bh(&bp->indirect_lock);
263 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265 spin_unlock_bh(&bp->indirect_lock);
269 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
272 spin_lock_bh(&bp->indirect_lock);
273 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
276 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279 for (i = 0; i < 5; i++) {
281 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
287 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288 REG_WR(bp, BNX2_CTX_DATA, val);
290 spin_unlock_bh(&bp->indirect_lock);
294 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
299 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
303 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
309 val1 = (bp->phy_addr << 21) | (reg << 16) |
310 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311 BNX2_EMAC_MDIO_COMM_START_BUSY;
312 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
314 for (i = 0; i < 50; i++) {
317 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
321 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
328 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
337 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
341 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
351 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
356 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
360 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
366 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
371 for (i = 0; i < 50; i++) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
381 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
386 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
390 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
400 bnx2_disable_int(struct bnx2 *bp)
402 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
403 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
404 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
408 bnx2_enable_int(struct bnx2 *bp)
410 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
411 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
412 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
414 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
415 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
417 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
421 bnx2_disable_int_sync(struct bnx2 *bp)
423 atomic_inc(&bp->intr_sem);
424 bnx2_disable_int(bp);
425 synchronize_irq(bp->pdev->irq);
429 bnx2_netif_stop(struct bnx2 *bp)
431 bnx2_disable_int_sync(bp);
432 if (netif_running(bp->dev)) {
433 napi_disable(&bp->napi);
434 netif_tx_disable(bp->dev);
435 bp->dev->trans_start = jiffies; /* prevent tx timeout */
440 bnx2_netif_start(struct bnx2 *bp)
442 if (atomic_dec_and_test(&bp->intr_sem)) {
443 if (netif_running(bp->dev)) {
444 netif_wake_queue(bp->dev);
445 napi_enable(&bp->napi);
452 bnx2_free_mem(struct bnx2 *bp)
456 for (i = 0; i < bp->ctx_pages; i++) {
457 if (bp->ctx_blk[i]) {
458 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
460 bp->ctx_blk_mapping[i]);
461 bp->ctx_blk[i] = NULL;
464 if (bp->status_blk) {
465 pci_free_consistent(bp->pdev, bp->status_stats_size,
466 bp->status_blk, bp->status_blk_mapping);
467 bp->status_blk = NULL;
468 bp->stats_blk = NULL;
470 if (bp->tx_desc_ring) {
471 pci_free_consistent(bp->pdev,
472 sizeof(struct tx_bd) * TX_DESC_CNT,
473 bp->tx_desc_ring, bp->tx_desc_mapping);
474 bp->tx_desc_ring = NULL;
476 kfree(bp->tx_buf_ring);
477 bp->tx_buf_ring = NULL;
478 for (i = 0; i < bp->rx_max_ring; i++) {
479 if (bp->rx_desc_ring[i])
480 pci_free_consistent(bp->pdev,
481 sizeof(struct rx_bd) * RX_DESC_CNT,
483 bp->rx_desc_mapping[i]);
484 bp->rx_desc_ring[i] = NULL;
486 vfree(bp->rx_buf_ring);
487 bp->rx_buf_ring = NULL;
491 bnx2_alloc_mem(struct bnx2 *bp)
493 int i, status_blk_size;
495 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
497 if (bp->tx_buf_ring == NULL)
500 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
501 sizeof(struct tx_bd) *
503 &bp->tx_desc_mapping);
504 if (bp->tx_desc_ring == NULL)
507 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
509 if (bp->rx_buf_ring == NULL)
512 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
515 for (i = 0; i < bp->rx_max_ring; i++) {
516 bp->rx_desc_ring[i] =
517 pci_alloc_consistent(bp->pdev,
518 sizeof(struct rx_bd) * RX_DESC_CNT,
519 &bp->rx_desc_mapping[i]);
520 if (bp->rx_desc_ring[i] == NULL)
525 /* Combine status and statistics blocks into one allocation. */
526 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
527 bp->status_stats_size = status_blk_size +
528 sizeof(struct statistics_block);
530 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
531 &bp->status_blk_mapping);
532 if (bp->status_blk == NULL)
535 memset(bp->status_blk, 0, bp->status_stats_size);
537 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
540 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
542 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
543 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
544 if (bp->ctx_pages == 0)
546 for (i = 0; i < bp->ctx_pages; i++) {
547 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
549 &bp->ctx_blk_mapping[i]);
550 if (bp->ctx_blk[i] == NULL)
562 bnx2_report_fw_link(struct bnx2 *bp)
564 u32 fw_link_status = 0;
566 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
572 switch (bp->line_speed) {
574 if (bp->duplex == DUPLEX_HALF)
575 fw_link_status = BNX2_LINK_STATUS_10HALF;
577 fw_link_status = BNX2_LINK_STATUS_10FULL;
580 if (bp->duplex == DUPLEX_HALF)
581 fw_link_status = BNX2_LINK_STATUS_100HALF;
583 fw_link_status = BNX2_LINK_STATUS_100FULL;
586 if (bp->duplex == DUPLEX_HALF)
587 fw_link_status = BNX2_LINK_STATUS_1000HALF;
589 fw_link_status = BNX2_LINK_STATUS_1000FULL;
592 if (bp->duplex == DUPLEX_HALF)
593 fw_link_status = BNX2_LINK_STATUS_2500HALF;
595 fw_link_status = BNX2_LINK_STATUS_2500FULL;
599 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
602 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
604 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
605 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
607 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
608 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
609 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
611 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
615 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
617 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
621 bnx2_xceiver_str(struct bnx2 *bp)
623 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
624 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
629 bnx2_report_link(struct bnx2 *bp)
632 netif_carrier_on(bp->dev);
633 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
634 bnx2_xceiver_str(bp));
636 printk("%d Mbps ", bp->line_speed);
638 if (bp->duplex == DUPLEX_FULL)
639 printk("full duplex");
641 printk("half duplex");
644 if (bp->flow_ctrl & FLOW_CTRL_RX) {
645 printk(", receive ");
646 if (bp->flow_ctrl & FLOW_CTRL_TX)
647 printk("& transmit ");
650 printk(", transmit ");
652 printk("flow control ON");
657 netif_carrier_off(bp->dev);
658 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
659 bnx2_xceiver_str(bp));
662 bnx2_report_fw_link(bp);
666 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
668 u32 local_adv, remote_adv;
671 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
672 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
674 if (bp->duplex == DUPLEX_FULL) {
675 bp->flow_ctrl = bp->req_flow_ctrl;
680 if (bp->duplex != DUPLEX_FULL) {
684 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
685 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
688 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
689 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
690 bp->flow_ctrl |= FLOW_CTRL_TX;
691 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
692 bp->flow_ctrl |= FLOW_CTRL_RX;
696 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
697 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
699 if (bp->phy_flags & PHY_SERDES_FLAG) {
700 u32 new_local_adv = 0;
701 u32 new_remote_adv = 0;
703 if (local_adv & ADVERTISE_1000XPAUSE)
704 new_local_adv |= ADVERTISE_PAUSE_CAP;
705 if (local_adv & ADVERTISE_1000XPSE_ASYM)
706 new_local_adv |= ADVERTISE_PAUSE_ASYM;
707 if (remote_adv & ADVERTISE_1000XPAUSE)
708 new_remote_adv |= ADVERTISE_PAUSE_CAP;
709 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
710 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
712 local_adv = new_local_adv;
713 remote_adv = new_remote_adv;
716 /* See Table 28B-3 of 802.3ab-1999 spec. */
717 if (local_adv & ADVERTISE_PAUSE_CAP) {
718 if(local_adv & ADVERTISE_PAUSE_ASYM) {
719 if (remote_adv & ADVERTISE_PAUSE_CAP) {
720 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
722 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
723 bp->flow_ctrl = FLOW_CTRL_RX;
727 if (remote_adv & ADVERTISE_PAUSE_CAP) {
728 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
732 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
733 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
734 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
736 bp->flow_ctrl = FLOW_CTRL_TX;
742 bnx2_5709s_linkup(struct bnx2 *bp)
748 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
749 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
750 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
752 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
753 bp->line_speed = bp->req_line_speed;
754 bp->duplex = bp->req_duplex;
757 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
759 case MII_BNX2_GP_TOP_AN_SPEED_10:
760 bp->line_speed = SPEED_10;
762 case MII_BNX2_GP_TOP_AN_SPEED_100:
763 bp->line_speed = SPEED_100;
765 case MII_BNX2_GP_TOP_AN_SPEED_1G:
766 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
767 bp->line_speed = SPEED_1000;
769 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
770 bp->line_speed = SPEED_2500;
773 if (val & MII_BNX2_GP_TOP_AN_FD)
774 bp->duplex = DUPLEX_FULL;
776 bp->duplex = DUPLEX_HALF;
781 bnx2_5708s_linkup(struct bnx2 *bp)
786 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
787 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
788 case BCM5708S_1000X_STAT1_SPEED_10:
789 bp->line_speed = SPEED_10;
791 case BCM5708S_1000X_STAT1_SPEED_100:
792 bp->line_speed = SPEED_100;
794 case BCM5708S_1000X_STAT1_SPEED_1G:
795 bp->line_speed = SPEED_1000;
797 case BCM5708S_1000X_STAT1_SPEED_2G5:
798 bp->line_speed = SPEED_2500;
801 if (val & BCM5708S_1000X_STAT1_FD)
802 bp->duplex = DUPLEX_FULL;
804 bp->duplex = DUPLEX_HALF;
810 bnx2_5706s_linkup(struct bnx2 *bp)
812 u32 bmcr, local_adv, remote_adv, common;
815 bp->line_speed = SPEED_1000;
817 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
818 if (bmcr & BMCR_FULLDPLX) {
819 bp->duplex = DUPLEX_FULL;
822 bp->duplex = DUPLEX_HALF;
825 if (!(bmcr & BMCR_ANENABLE)) {
829 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
830 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
832 common = local_adv & remote_adv;
833 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
835 if (common & ADVERTISE_1000XFULL) {
836 bp->duplex = DUPLEX_FULL;
839 bp->duplex = DUPLEX_HALF;
847 bnx2_copper_linkup(struct bnx2 *bp)
851 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
852 if (bmcr & BMCR_ANENABLE) {
853 u32 local_adv, remote_adv, common;
855 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
856 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
858 common = local_adv & (remote_adv >> 2);
859 if (common & ADVERTISE_1000FULL) {
860 bp->line_speed = SPEED_1000;
861 bp->duplex = DUPLEX_FULL;
863 else if (common & ADVERTISE_1000HALF) {
864 bp->line_speed = SPEED_1000;
865 bp->duplex = DUPLEX_HALF;
868 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
869 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
871 common = local_adv & remote_adv;
872 if (common & ADVERTISE_100FULL) {
873 bp->line_speed = SPEED_100;
874 bp->duplex = DUPLEX_FULL;
876 else if (common & ADVERTISE_100HALF) {
877 bp->line_speed = SPEED_100;
878 bp->duplex = DUPLEX_HALF;
880 else if (common & ADVERTISE_10FULL) {
881 bp->line_speed = SPEED_10;
882 bp->duplex = DUPLEX_FULL;
884 else if (common & ADVERTISE_10HALF) {
885 bp->line_speed = SPEED_10;
886 bp->duplex = DUPLEX_HALF;
895 if (bmcr & BMCR_SPEED100) {
896 bp->line_speed = SPEED_100;
899 bp->line_speed = SPEED_10;
901 if (bmcr & BMCR_FULLDPLX) {
902 bp->duplex = DUPLEX_FULL;
905 bp->duplex = DUPLEX_HALF;
913 bnx2_set_mac_link(struct bnx2 *bp)
917 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
918 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
919 (bp->duplex == DUPLEX_HALF)) {
920 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
923 /* Configure the EMAC mode register. */
924 val = REG_RD(bp, BNX2_EMAC_MODE);
926 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
927 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
928 BNX2_EMAC_MODE_25G_MODE);
931 switch (bp->line_speed) {
933 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
934 val |= BNX2_EMAC_MODE_PORT_MII_10M;
939 val |= BNX2_EMAC_MODE_PORT_MII;
942 val |= BNX2_EMAC_MODE_25G_MODE;
945 val |= BNX2_EMAC_MODE_PORT_GMII;
950 val |= BNX2_EMAC_MODE_PORT_GMII;
953 /* Set the MAC to operate in the appropriate duplex mode. */
954 if (bp->duplex == DUPLEX_HALF)
955 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
956 REG_WR(bp, BNX2_EMAC_MODE, val);
958 /* Enable/disable rx PAUSE. */
959 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
961 if (bp->flow_ctrl & FLOW_CTRL_RX)
962 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
963 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
965 /* Enable/disable tx PAUSE. */
966 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
967 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
969 if (bp->flow_ctrl & FLOW_CTRL_TX)
970 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
971 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
973 /* Acknowledge the interrupt. */
974 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
980 bnx2_enable_bmsr1(struct bnx2 *bp)
982 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
983 (CHIP_NUM(bp) == CHIP_NUM_5709))
984 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
985 MII_BNX2_BLK_ADDR_GP_STATUS);
989 bnx2_disable_bmsr1(struct bnx2 *bp)
991 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
992 (CHIP_NUM(bp) == CHIP_NUM_5709))
993 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
994 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
998 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1003 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1006 if (bp->autoneg & AUTONEG_SPEED)
1007 bp->advertising |= ADVERTISED_2500baseX_Full;
1009 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1010 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1012 bnx2_read_phy(bp, bp->mii_up1, &up1);
1013 if (!(up1 & BCM5708S_UP1_2G5)) {
1014 up1 |= BCM5708S_UP1_2G5;
1015 bnx2_write_phy(bp, bp->mii_up1, up1);
1019 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1020 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1021 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1027 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1032 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1035 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1036 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1038 bnx2_read_phy(bp, bp->mii_up1, &up1);
1039 if (up1 & BCM5708S_UP1_2G5) {
1040 up1 &= ~BCM5708S_UP1_2G5;
1041 bnx2_write_phy(bp, bp->mii_up1, up1);
1045 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1046 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1047 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1053 bnx2_enable_forced_2g5(struct bnx2 *bp)
1057 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1060 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1063 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1064 MII_BNX2_BLK_ADDR_SERDES_DIG);
1065 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1066 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1067 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1068 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1070 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1071 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1072 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1074 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1075 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1076 bmcr |= BCM5708S_BMCR_FORCE_2500;
1079 if (bp->autoneg & AUTONEG_SPEED) {
1080 bmcr &= ~BMCR_ANENABLE;
1081 if (bp->req_duplex == DUPLEX_FULL)
1082 bmcr |= BMCR_FULLDPLX;
1084 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1088 bnx2_disable_forced_2g5(struct bnx2 *bp)
1092 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1095 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1098 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1099 MII_BNX2_BLK_ADDR_SERDES_DIG);
1100 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1101 val &= ~MII_BNX2_SD_MISC1_FORCE;
1102 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1104 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1105 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1106 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1108 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1109 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1110 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1113 if (bp->autoneg & AUTONEG_SPEED)
1114 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1115 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1119 bnx2_set_link(struct bnx2 *bp)
1124 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1129 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1132 link_up = bp->link_up;
1134 bnx2_enable_bmsr1(bp);
1135 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1136 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1137 bnx2_disable_bmsr1(bp);
1139 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1140 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1143 val = REG_RD(bp, BNX2_EMAC_STATUS);
1144 if (val & BNX2_EMAC_STATUS_LINK)
1145 bmsr |= BMSR_LSTATUS;
1147 bmsr &= ~BMSR_LSTATUS;
1150 if (bmsr & BMSR_LSTATUS) {
1153 if (bp->phy_flags & PHY_SERDES_FLAG) {
1154 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1155 bnx2_5706s_linkup(bp);
1156 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1157 bnx2_5708s_linkup(bp);
1158 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1159 bnx2_5709s_linkup(bp);
1162 bnx2_copper_linkup(bp);
1164 bnx2_resolve_flow_ctrl(bp);
1167 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1168 (bp->autoneg & AUTONEG_SPEED))
1169 bnx2_disable_forced_2g5(bp);
1171 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1175 if (bp->link_up != link_up) {
1176 bnx2_report_link(bp);
1179 bnx2_set_mac_link(bp);
1185 bnx2_reset_phy(struct bnx2 *bp)
1190 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1192 #define PHY_RESET_MAX_WAIT 100
1193 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1196 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1197 if (!(reg & BMCR_RESET)) {
1202 if (i == PHY_RESET_MAX_WAIT) {
1209 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1213 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1214 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1216 if (bp->phy_flags & PHY_SERDES_FLAG) {
1217 adv = ADVERTISE_1000XPAUSE;
1220 adv = ADVERTISE_PAUSE_CAP;
1223 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1224 if (bp->phy_flags & PHY_SERDES_FLAG) {
1225 adv = ADVERTISE_1000XPSE_ASYM;
1228 adv = ADVERTISE_PAUSE_ASYM;
1231 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1232 if (bp->phy_flags & PHY_SERDES_FLAG) {
1233 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1236 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1242 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1245 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1247 u32 speed_arg = 0, pause_adv;
1249 pause_adv = bnx2_phy_get_pause_adv(bp);
1251 if (bp->autoneg & AUTONEG_SPEED) {
1252 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1253 if (bp->advertising & ADVERTISED_10baseT_Half)
1254 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1255 if (bp->advertising & ADVERTISED_10baseT_Full)
1256 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1257 if (bp->advertising & ADVERTISED_100baseT_Half)
1258 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1259 if (bp->advertising & ADVERTISED_100baseT_Full)
1260 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1261 if (bp->advertising & ADVERTISED_1000baseT_Full)
1262 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1263 if (bp->advertising & ADVERTISED_2500baseX_Full)
1264 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1266 if (bp->req_line_speed == SPEED_2500)
1267 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1268 else if (bp->req_line_speed == SPEED_1000)
1269 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1270 else if (bp->req_line_speed == SPEED_100) {
1271 if (bp->req_duplex == DUPLEX_FULL)
1272 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1274 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1275 } else if (bp->req_line_speed == SPEED_10) {
1276 if (bp->req_duplex == DUPLEX_FULL)
1277 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1279 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1283 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1284 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1285 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1286 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1288 if (port == PORT_TP)
1289 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1290 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1292 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1294 spin_unlock_bh(&bp->phy_lock);
1295 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1296 spin_lock_bh(&bp->phy_lock);
1302 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1307 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1308 return (bnx2_setup_remote_phy(bp, port));
1310 if (!(bp->autoneg & AUTONEG_SPEED)) {
1312 int force_link_down = 0;
1314 if (bp->req_line_speed == SPEED_2500) {
1315 if (!bnx2_test_and_enable_2g5(bp))
1316 force_link_down = 1;
1317 } else if (bp->req_line_speed == SPEED_1000) {
1318 if (bnx2_test_and_disable_2g5(bp))
1319 force_link_down = 1;
1321 bnx2_read_phy(bp, bp->mii_adv, &adv);
1322 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1324 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1325 new_bmcr = bmcr & ~BMCR_ANENABLE;
1326 new_bmcr |= BMCR_SPEED1000;
1328 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1329 if (bp->req_line_speed == SPEED_2500)
1330 bnx2_enable_forced_2g5(bp);
1331 else if (bp->req_line_speed == SPEED_1000) {
1332 bnx2_disable_forced_2g5(bp);
1333 new_bmcr &= ~0x2000;
1336 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1337 if (bp->req_line_speed == SPEED_2500)
1338 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1340 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1343 if (bp->req_duplex == DUPLEX_FULL) {
1344 adv |= ADVERTISE_1000XFULL;
1345 new_bmcr |= BMCR_FULLDPLX;
1348 adv |= ADVERTISE_1000XHALF;
1349 new_bmcr &= ~BMCR_FULLDPLX;
1351 if ((new_bmcr != bmcr) || (force_link_down)) {
1352 /* Force a link down visible on the other side */
1354 bnx2_write_phy(bp, bp->mii_adv, adv &
1355 ~(ADVERTISE_1000XFULL |
1356 ADVERTISE_1000XHALF));
1357 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1358 BMCR_ANRESTART | BMCR_ANENABLE);
1361 netif_carrier_off(bp->dev);
1362 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1363 bnx2_report_link(bp);
1365 bnx2_write_phy(bp, bp->mii_adv, adv);
1366 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1368 bnx2_resolve_flow_ctrl(bp);
1369 bnx2_set_mac_link(bp);
1374 bnx2_test_and_enable_2g5(bp);
1376 if (bp->advertising & ADVERTISED_1000baseT_Full)
1377 new_adv |= ADVERTISE_1000XFULL;
1379 new_adv |= bnx2_phy_get_pause_adv(bp);
1381 bnx2_read_phy(bp, bp->mii_adv, &adv);
1382 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1384 bp->serdes_an_pending = 0;
1385 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1386 /* Force a link down visible on the other side */
1388 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1389 spin_unlock_bh(&bp->phy_lock);
1391 spin_lock_bh(&bp->phy_lock);
1394 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1395 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1397 /* Speed up link-up time when the link partner
1398 * does not autonegotiate which is very common
1399 * in blade servers. Some blade servers use
1400 * IPMI for kerboard input and it's important
1401 * to minimize link disruptions. Autoneg. involves
1402 * exchanging base pages plus 3 next pages and
1403 * normally completes in about 120 msec.
1405 bp->current_interval = SERDES_AN_TIMEOUT;
1406 bp->serdes_an_pending = 1;
1407 mod_timer(&bp->timer, jiffies + bp->current_interval);
1409 bnx2_resolve_flow_ctrl(bp);
1410 bnx2_set_mac_link(bp);
1416 #define ETHTOOL_ALL_FIBRE_SPEED \
1417 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1418 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1419 (ADVERTISED_1000baseT_Full)
1421 #define ETHTOOL_ALL_COPPER_SPEED \
1422 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1423 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1424 ADVERTISED_1000baseT_Full)
1426 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1427 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1429 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1432 bnx2_set_default_remote_link(struct bnx2 *bp)
1436 if (bp->phy_port == PORT_TP)
1437 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1439 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1441 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1442 bp->req_line_speed = 0;
1443 bp->autoneg |= AUTONEG_SPEED;
1444 bp->advertising = ADVERTISED_Autoneg;
1445 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1446 bp->advertising |= ADVERTISED_10baseT_Half;
1447 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1448 bp->advertising |= ADVERTISED_10baseT_Full;
1449 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1450 bp->advertising |= ADVERTISED_100baseT_Half;
1451 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1452 bp->advertising |= ADVERTISED_100baseT_Full;
1453 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1454 bp->advertising |= ADVERTISED_1000baseT_Full;
1455 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1456 bp->advertising |= ADVERTISED_2500baseX_Full;
1459 bp->advertising = 0;
1460 bp->req_duplex = DUPLEX_FULL;
1461 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1462 bp->req_line_speed = SPEED_10;
1463 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1464 bp->req_duplex = DUPLEX_HALF;
1466 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1467 bp->req_line_speed = SPEED_100;
1468 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1469 bp->req_duplex = DUPLEX_HALF;
1471 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1472 bp->req_line_speed = SPEED_1000;
1473 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1474 bp->req_line_speed = SPEED_2500;
1479 bnx2_set_default_link(struct bnx2 *bp)
1481 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1482 return bnx2_set_default_remote_link(bp);
1484 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1485 bp->req_line_speed = 0;
1486 if (bp->phy_flags & PHY_SERDES_FLAG) {
1489 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1491 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1492 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1493 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1495 bp->req_line_speed = bp->line_speed = SPEED_1000;
1496 bp->req_duplex = DUPLEX_FULL;
1499 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1503 bnx2_send_heart_beat(struct bnx2 *bp)
1508 spin_lock(&bp->indirect_lock);
1509 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1510 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1511 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1512 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1513 spin_unlock(&bp->indirect_lock);
1517 bnx2_remote_phy_event(struct bnx2 *bp)
1520 u8 link_up = bp->link_up;
1523 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1525 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1526 bnx2_send_heart_beat(bp);
1528 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1530 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1536 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1537 bp->duplex = DUPLEX_FULL;
1539 case BNX2_LINK_STATUS_10HALF:
1540 bp->duplex = DUPLEX_HALF;
1541 case BNX2_LINK_STATUS_10FULL:
1542 bp->line_speed = SPEED_10;
1544 case BNX2_LINK_STATUS_100HALF:
1545 bp->duplex = DUPLEX_HALF;
1546 case BNX2_LINK_STATUS_100BASE_T4:
1547 case BNX2_LINK_STATUS_100FULL:
1548 bp->line_speed = SPEED_100;
1550 case BNX2_LINK_STATUS_1000HALF:
1551 bp->duplex = DUPLEX_HALF;
1552 case BNX2_LINK_STATUS_1000FULL:
1553 bp->line_speed = SPEED_1000;
1555 case BNX2_LINK_STATUS_2500HALF:
1556 bp->duplex = DUPLEX_HALF;
1557 case BNX2_LINK_STATUS_2500FULL:
1558 bp->line_speed = SPEED_2500;
1565 spin_lock(&bp->phy_lock);
1567 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1568 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1569 if (bp->duplex == DUPLEX_FULL)
1570 bp->flow_ctrl = bp->req_flow_ctrl;
1572 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1573 bp->flow_ctrl |= FLOW_CTRL_TX;
1574 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1575 bp->flow_ctrl |= FLOW_CTRL_RX;
1578 old_port = bp->phy_port;
1579 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1580 bp->phy_port = PORT_FIBRE;
1582 bp->phy_port = PORT_TP;
1584 if (old_port != bp->phy_port)
1585 bnx2_set_default_link(bp);
1587 spin_unlock(&bp->phy_lock);
1589 if (bp->link_up != link_up)
1590 bnx2_report_link(bp);
1592 bnx2_set_mac_link(bp);
1596 bnx2_set_remote_link(struct bnx2 *bp)
1600 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1602 case BNX2_FW_EVT_CODE_LINK_EVENT:
1603 bnx2_remote_phy_event(bp);
1605 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1607 bnx2_send_heart_beat(bp);
1614 bnx2_setup_copper_phy(struct bnx2 *bp)
1619 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1621 if (bp->autoneg & AUTONEG_SPEED) {
1622 u32 adv_reg, adv1000_reg;
1623 u32 new_adv_reg = 0;
1624 u32 new_adv1000_reg = 0;
1626 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1627 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1628 ADVERTISE_PAUSE_ASYM);
1630 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1631 adv1000_reg &= PHY_ALL_1000_SPEED;
1633 if (bp->advertising & ADVERTISED_10baseT_Half)
1634 new_adv_reg |= ADVERTISE_10HALF;
1635 if (bp->advertising & ADVERTISED_10baseT_Full)
1636 new_adv_reg |= ADVERTISE_10FULL;
1637 if (bp->advertising & ADVERTISED_100baseT_Half)
1638 new_adv_reg |= ADVERTISE_100HALF;
1639 if (bp->advertising & ADVERTISED_100baseT_Full)
1640 new_adv_reg |= ADVERTISE_100FULL;
1641 if (bp->advertising & ADVERTISED_1000baseT_Full)
1642 new_adv1000_reg |= ADVERTISE_1000FULL;
1644 new_adv_reg |= ADVERTISE_CSMA;
1646 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1648 if ((adv1000_reg != new_adv1000_reg) ||
1649 (adv_reg != new_adv_reg) ||
1650 ((bmcr & BMCR_ANENABLE) == 0)) {
1652 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1653 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1654 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1657 else if (bp->link_up) {
1658 /* Flow ctrl may have changed from auto to forced */
1659 /* or vice-versa. */
1661 bnx2_resolve_flow_ctrl(bp);
1662 bnx2_set_mac_link(bp);
1668 if (bp->req_line_speed == SPEED_100) {
1669 new_bmcr |= BMCR_SPEED100;
1671 if (bp->req_duplex == DUPLEX_FULL) {
1672 new_bmcr |= BMCR_FULLDPLX;
1674 if (new_bmcr != bmcr) {
1677 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1678 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1680 if (bmsr & BMSR_LSTATUS) {
1681 /* Force link down */
1682 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1683 spin_unlock_bh(&bp->phy_lock);
1685 spin_lock_bh(&bp->phy_lock);
1687 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1688 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1691 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1693 /* Normally, the new speed is setup after the link has
1694 * gone down and up again. In some cases, link will not go
1695 * down so we need to set up the new speed here.
1697 if (bmsr & BMSR_LSTATUS) {
1698 bp->line_speed = bp->req_line_speed;
1699 bp->duplex = bp->req_duplex;
1700 bnx2_resolve_flow_ctrl(bp);
1701 bnx2_set_mac_link(bp);
1704 bnx2_resolve_flow_ctrl(bp);
1705 bnx2_set_mac_link(bp);
1711 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1713 if (bp->loopback == MAC_LOOPBACK)
1716 if (bp->phy_flags & PHY_SERDES_FLAG) {
1717 return (bnx2_setup_serdes_phy(bp, port));
1720 return (bnx2_setup_copper_phy(bp));
1725 bnx2_init_5709s_phy(struct bnx2 *bp)
1729 bp->mii_bmcr = MII_BMCR + 0x10;
1730 bp->mii_bmsr = MII_BMSR + 0x10;
1731 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1732 bp->mii_adv = MII_ADVERTISE + 0x10;
1733 bp->mii_lpa = MII_LPA + 0x10;
1734 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1736 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1737 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1739 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1742 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1744 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1745 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1746 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1747 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1749 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1750 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1751 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1752 val |= BCM5708S_UP1_2G5;
1754 val &= ~BCM5708S_UP1_2G5;
1755 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1757 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1758 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1759 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1760 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1762 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1764 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1765 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1766 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1768 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1774 bnx2_init_5708s_phy(struct bnx2 *bp)
1780 bp->mii_up1 = BCM5708S_UP1;
1782 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1783 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1784 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1786 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1787 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1788 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1790 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1791 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1792 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1794 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1795 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1796 val |= BCM5708S_UP1_2G5;
1797 bnx2_write_phy(bp, BCM5708S_UP1, val);
1800 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1801 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1802 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1803 /* increase tx signal amplitude */
1804 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1805 BCM5708S_BLK_ADDR_TX_MISC);
1806 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1807 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1808 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1809 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1812 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1813 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1818 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1819 BNX2_SHARED_HW_CFG_CONFIG);
1820 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1821 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1822 BCM5708S_BLK_ADDR_TX_MISC);
1823 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1824 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1825 BCM5708S_BLK_ADDR_DIG);
1832 bnx2_init_5706s_phy(struct bnx2 *bp)
1836 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1838 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1839 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1841 if (bp->dev->mtu > 1500) {
1844 /* Set extended packet length bit */
1845 bnx2_write_phy(bp, 0x18, 0x7);
1846 bnx2_read_phy(bp, 0x18, &val);
1847 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1849 bnx2_write_phy(bp, 0x1c, 0x6c00);
1850 bnx2_read_phy(bp, 0x1c, &val);
1851 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1856 bnx2_write_phy(bp, 0x18, 0x7);
1857 bnx2_read_phy(bp, 0x18, &val);
1858 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1860 bnx2_write_phy(bp, 0x1c, 0x6c00);
1861 bnx2_read_phy(bp, 0x1c, &val);
1862 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1869 bnx2_init_copper_phy(struct bnx2 *bp)
1875 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1876 bnx2_write_phy(bp, 0x18, 0x0c00);
1877 bnx2_write_phy(bp, 0x17, 0x000a);
1878 bnx2_write_phy(bp, 0x15, 0x310b);
1879 bnx2_write_phy(bp, 0x17, 0x201f);
1880 bnx2_write_phy(bp, 0x15, 0x9506);
1881 bnx2_write_phy(bp, 0x17, 0x401f);
1882 bnx2_write_phy(bp, 0x15, 0x14e2);
1883 bnx2_write_phy(bp, 0x18, 0x0400);
1886 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1887 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1888 MII_BNX2_DSP_EXPAND_REG | 0x8);
1889 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1891 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1894 if (bp->dev->mtu > 1500) {
1895 /* Set extended packet length bit */
1896 bnx2_write_phy(bp, 0x18, 0x7);
1897 bnx2_read_phy(bp, 0x18, &val);
1898 bnx2_write_phy(bp, 0x18, val | 0x4000);
1900 bnx2_read_phy(bp, 0x10, &val);
1901 bnx2_write_phy(bp, 0x10, val | 0x1);
1904 bnx2_write_phy(bp, 0x18, 0x7);
1905 bnx2_read_phy(bp, 0x18, &val);
1906 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1908 bnx2_read_phy(bp, 0x10, &val);
1909 bnx2_write_phy(bp, 0x10, val & ~0x1);
1912 /* ethernet@wirespeed */
1913 bnx2_write_phy(bp, 0x18, 0x7007);
1914 bnx2_read_phy(bp, 0x18, &val);
1915 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1921 bnx2_init_phy(struct bnx2 *bp)
1926 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1927 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1929 bp->mii_bmcr = MII_BMCR;
1930 bp->mii_bmsr = MII_BMSR;
1931 bp->mii_bmsr1 = MII_BMSR;
1932 bp->mii_adv = MII_ADVERTISE;
1933 bp->mii_lpa = MII_LPA;
1935 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1937 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1940 bnx2_read_phy(bp, MII_PHYSID1, &val);
1941 bp->phy_id = val << 16;
1942 bnx2_read_phy(bp, MII_PHYSID2, &val);
1943 bp->phy_id |= val & 0xffff;
1945 if (bp->phy_flags & PHY_SERDES_FLAG) {
1946 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1947 rc = bnx2_init_5706s_phy(bp);
1948 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1949 rc = bnx2_init_5708s_phy(bp);
1950 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1951 rc = bnx2_init_5709s_phy(bp);
1954 rc = bnx2_init_copper_phy(bp);
1959 rc = bnx2_setup_phy(bp, bp->phy_port);
1965 bnx2_set_mac_loopback(struct bnx2 *bp)
1969 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1970 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1971 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1972 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1977 static int bnx2_test_link(struct bnx2 *);
1980 bnx2_set_phy_loopback(struct bnx2 *bp)
1985 spin_lock_bh(&bp->phy_lock);
1986 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1988 spin_unlock_bh(&bp->phy_lock);
1992 for (i = 0; i < 10; i++) {
1993 if (bnx2_test_link(bp) == 0)
1998 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1999 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2000 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2001 BNX2_EMAC_MODE_25G_MODE);
2003 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2004 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2010 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2016 msg_data |= bp->fw_wr_seq;
2018 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2020 /* wait for an acknowledgement. */
2021 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2024 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2026 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2029 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2032 /* If we timed out, inform the firmware that this is the case. */
2033 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2035 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2038 msg_data &= ~BNX2_DRV_MSG_CODE;
2039 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2041 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2046 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2053 bnx2_init_5709_context(struct bnx2 *bp)
2058 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2059 val |= (BCM_PAGE_BITS - 8) << 16;
2060 REG_WR(bp, BNX2_CTX_COMMAND, val);
2061 for (i = 0; i < 10; i++) {
2062 val = REG_RD(bp, BNX2_CTX_COMMAND);
2063 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2067 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2070 for (i = 0; i < bp->ctx_pages; i++) {
2073 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2074 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2075 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2076 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2077 (u64) bp->ctx_blk_mapping[i] >> 32);
2078 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2079 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2080 for (j = 0; j < 10; j++) {
2082 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2083 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2087 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2096 bnx2_init_context(struct bnx2 *bp)
2102 u32 vcid_addr, pcid_addr, offset;
2107 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2110 vcid_addr = GET_PCID_ADDR(vcid);
2112 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2117 pcid_addr = GET_PCID_ADDR(new_vcid);
2120 vcid_addr = GET_CID_ADDR(vcid);
2121 pcid_addr = vcid_addr;
2124 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2125 vcid_addr += (i << PHY_CTX_SHIFT);
2126 pcid_addr += (i << PHY_CTX_SHIFT);
2128 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2129 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2131 /* Zero out the context. */
2132 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2133 CTX_WR(bp, 0x00, offset, 0);
2135 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2136 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2142 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2148 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2149 if (good_mbuf == NULL) {
2150 printk(KERN_ERR PFX "Failed to allocate memory in "
2151 "bnx2_alloc_bad_rbuf\n");
2155 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2156 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2160 /* Allocate a bunch of mbufs and save the good ones in an array. */
2161 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2162 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2163 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2165 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2167 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2169 /* The addresses with Bit 9 set are bad memory blocks. */
2170 if (!(val & (1 << 9))) {
2171 good_mbuf[good_mbuf_cnt] = (u16) val;
2175 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2178 /* Free the good ones back to the mbuf pool thus discarding
2179 * all the bad ones. */
2180 while (good_mbuf_cnt) {
2183 val = good_mbuf[good_mbuf_cnt];
2184 val = (val << 9) | val | 1;
2186 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2193 bnx2_set_mac_addr(struct bnx2 *bp)
2196 u8 *mac_addr = bp->dev->dev_addr;
2198 val = (mac_addr[0] << 8) | mac_addr[1];
2200 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2202 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2203 (mac_addr[4] << 8) | mac_addr[5];
2205 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2209 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2211 struct sk_buff *skb;
2212 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2214 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2215 unsigned long align;
2217 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2222 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2223 skb_reserve(skb, BNX2_RX_ALIGN - align);
2225 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2226 PCI_DMA_FROMDEVICE);
2229 pci_unmap_addr_set(rx_buf, mapping, mapping);
2231 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2232 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2234 bp->rx_prod_bseq += bp->rx_buf_use_size;
2240 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2242 struct status_block *sblk = bp->status_blk;
2243 u32 new_link_state, old_link_state;
2246 new_link_state = sblk->status_attn_bits & event;
2247 old_link_state = sblk->status_attn_bits_ack & event;
2248 if (new_link_state != old_link_state) {
2250 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2252 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2260 bnx2_phy_int(struct bnx2 *bp)
2262 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2263 spin_lock(&bp->phy_lock);
2265 spin_unlock(&bp->phy_lock);
2267 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2268 bnx2_set_remote_link(bp);
2273 bnx2_tx_int(struct bnx2 *bp)
2275 struct status_block *sblk = bp->status_blk;
2276 u16 hw_cons, sw_cons, sw_ring_cons;
2279 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
2280 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2283 sw_cons = bp->tx_cons;
2285 while (sw_cons != hw_cons) {
2286 struct sw_bd *tx_buf;
2287 struct sk_buff *skb;
2290 sw_ring_cons = TX_RING_IDX(sw_cons);
2292 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2295 /* partial BD completions possible with TSO packets */
2296 if (skb_is_gso(skb)) {
2297 u16 last_idx, last_ring_idx;
2299 last_idx = sw_cons +
2300 skb_shinfo(skb)->nr_frags + 1;
2301 last_ring_idx = sw_ring_cons +
2302 skb_shinfo(skb)->nr_frags + 1;
2303 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2306 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2311 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2312 skb_headlen(skb), PCI_DMA_TODEVICE);
2315 last = skb_shinfo(skb)->nr_frags;
2317 for (i = 0; i < last; i++) {
2318 sw_cons = NEXT_TX_BD(sw_cons);
2320 pci_unmap_page(bp->pdev,
2322 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2324 skb_shinfo(skb)->frags[i].size,
2328 sw_cons = NEXT_TX_BD(sw_cons);
2330 tx_free_bd += last + 1;
2334 hw_cons = bp->hw_tx_cons =
2335 sblk->status_tx_quick_consumer_index0;
2337 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2342 bp->tx_cons = sw_cons;
2343 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2344 * before checking for netif_queue_stopped(). Without the
2345 * memory barrier, there is a small possibility that bnx2_start_xmit()
2346 * will miss it and cause the queue to be stopped forever.
2350 if (unlikely(netif_queue_stopped(bp->dev)) &&
2351 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2352 netif_tx_lock(bp->dev);
2353 if ((netif_queue_stopped(bp->dev)) &&
2354 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2355 netif_wake_queue(bp->dev);
2356 netif_tx_unlock(bp->dev);
2361 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2364 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2365 struct rx_bd *cons_bd, *prod_bd;
2367 cons_rx_buf = &bp->rx_buf_ring[cons];
2368 prod_rx_buf = &bp->rx_buf_ring[prod];
2370 pci_dma_sync_single_for_device(bp->pdev,
2371 pci_unmap_addr(cons_rx_buf, mapping),
2372 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2374 bp->rx_prod_bseq += bp->rx_buf_use_size;
2376 prod_rx_buf->skb = skb;
2381 pci_unmap_addr_set(prod_rx_buf, mapping,
2382 pci_unmap_addr(cons_rx_buf, mapping));
2384 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2385 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2386 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2387 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2391 bnx2_rx_int(struct bnx2 *bp, int budget)
2393 struct status_block *sblk = bp->status_blk;
2394 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2395 struct l2_fhdr *rx_hdr;
2398 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
2399 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2402 sw_cons = bp->rx_cons;
2403 sw_prod = bp->rx_prod;
2405 /* Memory barrier necessary as speculative reads of the rx
2406 * buffer can be ahead of the index in the status block
2409 while (sw_cons != hw_cons) {
2412 struct sw_bd *rx_buf;
2413 struct sk_buff *skb;
2414 dma_addr_t dma_addr;
2416 sw_ring_cons = RX_RING_IDX(sw_cons);
2417 sw_ring_prod = RX_RING_IDX(sw_prod);
2419 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2424 dma_addr = pci_unmap_addr(rx_buf, mapping);
2426 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2427 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2429 rx_hdr = (struct l2_fhdr *) skb->data;
2430 len = rx_hdr->l2_fhdr_pkt_len - 4;
2432 if ((status = rx_hdr->l2_fhdr_status) &
2433 (L2_FHDR_ERRORS_BAD_CRC |
2434 L2_FHDR_ERRORS_PHY_DECODE |
2435 L2_FHDR_ERRORS_ALIGNMENT |
2436 L2_FHDR_ERRORS_TOO_SHORT |
2437 L2_FHDR_ERRORS_GIANT_FRAME)) {
2442 /* Since we don't have a jumbo ring, copy small packets
2445 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2446 struct sk_buff *new_skb;
2448 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2449 if (new_skb == NULL)
2453 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2454 new_skb->data, len + 2);
2455 skb_reserve(new_skb, 2);
2456 skb_put(new_skb, len);
2458 bnx2_reuse_rx_skb(bp, skb,
2459 sw_ring_cons, sw_ring_prod);
2463 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
2464 pci_unmap_single(bp->pdev, dma_addr,
2465 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2467 skb_reserve(skb, bp->rx_offset);
2472 bnx2_reuse_rx_skb(bp, skb,
2473 sw_ring_cons, sw_ring_prod);
2477 skb->protocol = eth_type_trans(skb, bp->dev);
2479 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2480 (ntohs(skb->protocol) != 0x8100)) {
2487 skb->ip_summed = CHECKSUM_NONE;
2489 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2490 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2492 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2493 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2494 skb->ip_summed = CHECKSUM_UNNECESSARY;
2498 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2499 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2500 rx_hdr->l2_fhdr_vlan_tag);
2504 netif_receive_skb(skb);
2506 bp->dev->last_rx = jiffies;
2510 sw_cons = NEXT_RX_BD(sw_cons);
2511 sw_prod = NEXT_RX_BD(sw_prod);
2513 if ((rx_pkt == budget))
2516 /* Refresh hw_cons to see if there is new work */
2517 if (sw_cons == hw_cons) {
2518 hw_cons = bp->hw_rx_cons =
2519 sblk->status_rx_quick_consumer_index0;
2520 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2525 bp->rx_cons = sw_cons;
2526 bp->rx_prod = sw_prod;
2528 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2530 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2538 /* MSI ISR - The only difference between this and the INTx ISR
2539 * is that the MSI interrupt is always serviced.
2542 bnx2_msi(int irq, void *dev_instance)
2544 struct net_device *dev = dev_instance;
2545 struct bnx2 *bp = netdev_priv(dev);
2547 prefetch(bp->status_blk);
2548 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2549 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2550 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2552 /* Return here if interrupt is disabled. */
2553 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2556 netif_rx_schedule(dev, &bp->napi);
2562 bnx2_msi_1shot(int irq, void *dev_instance)
2564 struct net_device *dev = dev_instance;
2565 struct bnx2 *bp = netdev_priv(dev);
2567 prefetch(bp->status_blk);
2569 /* Return here if interrupt is disabled. */
2570 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2573 netif_rx_schedule(dev, &bp->napi);
2579 bnx2_interrupt(int irq, void *dev_instance)
2581 struct net_device *dev = dev_instance;
2582 struct bnx2 *bp = netdev_priv(dev);
2583 struct status_block *sblk = bp->status_blk;
2585 /* When using INTx, it is possible for the interrupt to arrive
2586 * at the CPU before the status block posted prior to the
2587 * interrupt. Reading a register will flush the status block.
2588 * When using MSI, the MSI message will always complete after
2589 * the status block write.
2591 if ((sblk->status_idx == bp->last_status_idx) &&
2592 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2593 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2596 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2597 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2598 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2600 /* Read back to deassert IRQ immediately to avoid too many
2601 * spurious interrupts.
2603 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2605 /* Return here if interrupt is shared and is disabled. */
2606 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2609 if (netif_rx_schedule_prep(dev, &bp->napi)) {
2610 bp->last_status_idx = sblk->status_idx;
2611 __netif_rx_schedule(dev, &bp->napi);
2617 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2618 STATUS_ATTN_BITS_TIMER_ABORT)
2621 bnx2_has_work(struct bnx2 *bp)
2623 struct status_block *sblk = bp->status_blk;
2625 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2626 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2629 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2630 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2637 bnx2_poll(struct napi_struct *napi, int budget)
2639 struct bnx2 *bp = container_of(napi, struct bnx2, napi);
2640 struct net_device *dev = bp->dev;
2641 struct status_block *sblk = bp->status_blk;
2642 u32 status_attn_bits = sblk->status_attn_bits;
2643 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2646 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2647 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2651 /* This is needed to take care of transient status
2652 * during link changes.
2654 REG_WR(bp, BNX2_HC_COMMAND,
2655 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2656 REG_RD(bp, BNX2_HC_COMMAND);
2659 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2662 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons)
2663 work_done = bnx2_rx_int(bp, budget);
2665 bp->last_status_idx = bp->status_blk->status_idx;
2668 if (!bnx2_has_work(bp)) {
2669 netif_rx_complete(dev, napi);
2670 if (likely(bp->flags & USING_MSI_FLAG)) {
2671 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2672 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2673 bp->last_status_idx);
2676 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2677 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2678 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2679 bp->last_status_idx);
2681 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2682 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2683 bp->last_status_idx);
2689 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2690 * from set_multicast.
2693 bnx2_set_rx_mode(struct net_device *dev)
2695 struct bnx2 *bp = netdev_priv(dev);
2696 u32 rx_mode, sort_mode;
2699 spin_lock_bh(&bp->phy_lock);
2701 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2702 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2703 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2705 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2706 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2708 if (!(bp->flags & ASF_ENABLE_FLAG))
2709 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2711 if (dev->flags & IFF_PROMISC) {
2712 /* Promiscuous mode. */
2713 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2714 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2715 BNX2_RPM_SORT_USER0_PROM_VLAN;
2717 else if (dev->flags & IFF_ALLMULTI) {
2718 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2719 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2722 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2725 /* Accept one or more multicast(s). */
2726 struct dev_mc_list *mclist;
2727 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2732 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2734 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2735 i++, mclist = mclist->next) {
2737 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2739 regidx = (bit & 0xe0) >> 5;
2741 mc_filter[regidx] |= (1 << bit);
2744 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2745 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2749 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2752 if (rx_mode != bp->rx_mode) {
2753 bp->rx_mode = rx_mode;
2754 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2757 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2758 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2759 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2761 spin_unlock_bh(&bp->phy_lock);
2765 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2772 for (i = 0; i < rv2p_code_len; i += 8) {
2773 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2775 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2778 if (rv2p_proc == RV2P_PROC1) {
2779 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2780 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2783 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2784 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2788 /* Reset the processor, un-stall is done later. */
2789 if (rv2p_proc == RV2P_PROC1) {
2790 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2793 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2798 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2805 val = REG_RD_IND(bp, cpu_reg->mode);
2806 val |= cpu_reg->mode_value_halt;
2807 REG_WR_IND(bp, cpu_reg->mode, val);
2808 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2810 /* Load the Text area. */
2811 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2815 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
2820 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2821 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2825 /* Load the Data area. */
2826 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2830 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2831 REG_WR_IND(bp, offset, fw->data[j]);
2835 /* Load the SBSS area. */
2836 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2840 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2841 REG_WR_IND(bp, offset, 0);
2845 /* Load the BSS area. */
2846 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2850 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2851 REG_WR_IND(bp, offset, 0);
2855 /* Load the Read-Only area. */
2856 offset = cpu_reg->spad_base +
2857 (fw->rodata_addr - cpu_reg->mips_view_base);
2861 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2862 REG_WR_IND(bp, offset, fw->rodata[j]);
2866 /* Clear the pre-fetch instruction. */
2867 REG_WR_IND(bp, cpu_reg->inst, 0);
2868 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2870 /* Start the CPU. */
2871 val = REG_RD_IND(bp, cpu_reg->mode);
2872 val &= ~cpu_reg->mode_value_halt;
2873 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2874 REG_WR_IND(bp, cpu_reg->mode, val);
2880 bnx2_init_cpus(struct bnx2 *bp)
2882 struct cpu_reg cpu_reg;
2887 /* Initialize the RV2P processor. */
2888 text = vmalloc(FW_BUF_SIZE);
2891 rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1));
2895 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
2897 rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2));
2901 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
2903 /* Initialize the RX Processor. */
2904 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2905 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2906 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2907 cpu_reg.state = BNX2_RXP_CPU_STATE;
2908 cpu_reg.state_value_clear = 0xffffff;
2909 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2910 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2911 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2912 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2913 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2914 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2915 cpu_reg.mips_view_base = 0x8000000;
2917 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2918 fw = &bnx2_rxp_fw_09;
2920 fw = &bnx2_rxp_fw_06;
2923 rc = load_cpu_fw(bp, &cpu_reg, fw);
2927 /* Initialize the TX Processor. */
2928 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2929 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2930 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2931 cpu_reg.state = BNX2_TXP_CPU_STATE;
2932 cpu_reg.state_value_clear = 0xffffff;
2933 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2934 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2935 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2936 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2937 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2938 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2939 cpu_reg.mips_view_base = 0x8000000;
2941 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2942 fw = &bnx2_txp_fw_09;
2944 fw = &bnx2_txp_fw_06;
2947 rc = load_cpu_fw(bp, &cpu_reg, fw);
2951 /* Initialize the TX Patch-up Processor. */
2952 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2953 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2954 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2955 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2956 cpu_reg.state_value_clear = 0xffffff;
2957 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2958 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2959 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2960 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2961 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2962 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2963 cpu_reg.mips_view_base = 0x8000000;
2965 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2966 fw = &bnx2_tpat_fw_09;
2968 fw = &bnx2_tpat_fw_06;
2971 rc = load_cpu_fw(bp, &cpu_reg, fw);
2975 /* Initialize the Completion Processor. */
2976 cpu_reg.mode = BNX2_COM_CPU_MODE;
2977 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2978 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2979 cpu_reg.state = BNX2_COM_CPU_STATE;
2980 cpu_reg.state_value_clear = 0xffffff;
2981 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2982 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2983 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2984 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2985 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2986 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2987 cpu_reg.mips_view_base = 0x8000000;
2989 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2990 fw = &bnx2_com_fw_09;
2992 fw = &bnx2_com_fw_06;
2995 rc = load_cpu_fw(bp, &cpu_reg, fw);
2999 /* Initialize the Command Processor. */
3000 cpu_reg.mode = BNX2_CP_CPU_MODE;
3001 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3002 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3003 cpu_reg.state = BNX2_CP_CPU_STATE;
3004 cpu_reg.state_value_clear = 0xffffff;
3005 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3006 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3007 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3008 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3009 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3010 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3011 cpu_reg.mips_view_base = 0x8000000;
3013 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3014 fw = &bnx2_cp_fw_09;
3017 rc = load_cpu_fw(bp, &cpu_reg, fw);
3027 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3031 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3037 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3038 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3039 PCI_PM_CTRL_PME_STATUS);
3041 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3042 /* delay required during transition out of D3hot */
3045 val = REG_RD(bp, BNX2_EMAC_MODE);
3046 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3047 val &= ~BNX2_EMAC_MODE_MPKT;
3048 REG_WR(bp, BNX2_EMAC_MODE, val);
3050 val = REG_RD(bp, BNX2_RPM_CONFIG);
3051 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3052 REG_WR(bp, BNX2_RPM_CONFIG, val);
3063 autoneg = bp->autoneg;
3064 advertising = bp->advertising;
3066 bp->autoneg = AUTONEG_SPEED;
3067 bp->advertising = ADVERTISED_10baseT_Half |
3068 ADVERTISED_10baseT_Full |
3069 ADVERTISED_100baseT_Half |
3070 ADVERTISED_100baseT_Full |
3073 bnx2_setup_copper_phy(bp);
3075 bp->autoneg = autoneg;
3076 bp->advertising = advertising;
3078 bnx2_set_mac_addr(bp);
3080 val = REG_RD(bp, BNX2_EMAC_MODE);
3082 /* Enable port mode. */
3083 val &= ~BNX2_EMAC_MODE_PORT;
3084 val |= BNX2_EMAC_MODE_PORT_MII |
3085 BNX2_EMAC_MODE_MPKT_RCVD |
3086 BNX2_EMAC_MODE_ACPI_RCVD |
3087 BNX2_EMAC_MODE_MPKT;
3089 REG_WR(bp, BNX2_EMAC_MODE, val);
3091 /* receive all multicast */
3092 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3093 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3096 REG_WR(bp, BNX2_EMAC_RX_MODE,
3097 BNX2_EMAC_RX_MODE_SORT_MODE);
3099 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3100 BNX2_RPM_SORT_USER0_MC_EN;
3101 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3102 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3103 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3104 BNX2_RPM_SORT_USER0_ENA);
3106 /* Need to enable EMAC and RPM for WOL. */
3107 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3108 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3109 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3110 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3112 val = REG_RD(bp, BNX2_RPM_CONFIG);
3113 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3114 REG_WR(bp, BNX2_RPM_CONFIG, val);
3116 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3119 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3122 if (!(bp->flags & NO_WOL_FLAG))
3123 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3125 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3126 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3127 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3136 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3138 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3141 /* No more memory access after this point until
3142 * device is brought back to D0.
3154 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3159 /* Request access to the flash interface. */
3160 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3161 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3162 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3163 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3169 if (j >= NVRAM_TIMEOUT_COUNT)
3176 bnx2_release_nvram_lock(struct bnx2 *bp)
3181 /* Relinquish nvram interface. */
3182 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3184 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3185 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3186 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3192 if (j >= NVRAM_TIMEOUT_COUNT)
3200 bnx2_enable_nvram_write(struct bnx2 *bp)
3204 val = REG_RD(bp, BNX2_MISC_CFG);
3205 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3207 if (bp->flash_info->flags & BNX2_NV_WREN) {
3210 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3211 REG_WR(bp, BNX2_NVM_COMMAND,
3212 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3214 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3217 val = REG_RD(bp, BNX2_NVM_COMMAND);
3218 if (val & BNX2_NVM_COMMAND_DONE)
3222 if (j >= NVRAM_TIMEOUT_COUNT)
3229 bnx2_disable_nvram_write(struct bnx2 *bp)
3233 val = REG_RD(bp, BNX2_MISC_CFG);
3234 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3239 bnx2_enable_nvram_access(struct bnx2 *bp)
3243 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3244 /* Enable both bits, even on read. */
3245 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3246 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3250 bnx2_disable_nvram_access(struct bnx2 *bp)
3254 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3255 /* Disable both bits, even after read. */
3256 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3257 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3258 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3262 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3267 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3268 /* Buffered flash, no erase needed */
3271 /* Build an erase command */
3272 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3273 BNX2_NVM_COMMAND_DOIT;
3275 /* Need to clear DONE bit separately. */
3276 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3278 /* Address of the NVRAM to read from. */
3279 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3281 /* Issue an erase command. */
3282 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3284 /* Wait for completion. */
3285 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3290 val = REG_RD(bp, BNX2_NVM_COMMAND);
3291 if (val & BNX2_NVM_COMMAND_DONE)
3295 if (j >= NVRAM_TIMEOUT_COUNT)
3302 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3307 /* Build the command word. */
3308 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3310 /* Calculate an offset of a buffered flash, not needed for 5709. */
3311 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3312 offset = ((offset / bp->flash_info->page_size) <<
3313 bp->flash_info->page_bits) +
3314 (offset % bp->flash_info->page_size);
3317 /* Need to clear DONE bit separately. */
3318 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3320 /* Address of the NVRAM to read from. */
3321 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3323 /* Issue a read command. */
3324 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3326 /* Wait for completion. */
3327 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3332 val = REG_RD(bp, BNX2_NVM_COMMAND);
3333 if (val & BNX2_NVM_COMMAND_DONE) {
3334 val = REG_RD(bp, BNX2_NVM_READ);
3336 val = be32_to_cpu(val);
3337 memcpy(ret_val, &val, 4);
3341 if (j >= NVRAM_TIMEOUT_COUNT)
3349 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3354 /* Build the command word. */
3355 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3357 /* Calculate an offset of a buffered flash, not needed for 5709. */
3358 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3359 offset = ((offset / bp->flash_info->page_size) <<
3360 bp->flash_info->page_bits) +
3361 (offset % bp->flash_info->page_size);
3364 /* Need to clear DONE bit separately. */
3365 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3367 memcpy(&val32, val, 4);
3368 val32 = cpu_to_be32(val32);
3370 /* Write the data. */
3371 REG_WR(bp, BNX2_NVM_WRITE, val32);
3373 /* Address of the NVRAM to write to. */
3374 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3376 /* Issue the write command. */
3377 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3379 /* Wait for completion. */
3380 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3383 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3386 if (j >= NVRAM_TIMEOUT_COUNT)
3393 bnx2_init_nvram(struct bnx2 *bp)
3396 int j, entry_count, rc = 0;
3397 struct flash_spec *flash;
3399 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3400 bp->flash_info = &flash_5709;
3401 goto get_flash_size;
3404 /* Determine the selected interface. */
3405 val = REG_RD(bp, BNX2_NVM_CFG1);
3407 entry_count = ARRAY_SIZE(flash_table);
3409 if (val & 0x40000000) {
3411 /* Flash interface has been reconfigured */
3412 for (j = 0, flash = &flash_table[0]; j < entry_count;
3414 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3415 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3416 bp->flash_info = flash;
3423 /* Not yet been reconfigured */
3425 if (val & (1 << 23))
3426 mask = FLASH_BACKUP_STRAP_MASK;
3428 mask = FLASH_STRAP_MASK;
3430 for (j = 0, flash = &flash_table[0]; j < entry_count;
3433 if ((val & mask) == (flash->strapping & mask)) {
3434 bp->flash_info = flash;
3436 /* Request access to the flash interface. */
3437 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3440 /* Enable access to flash interface */
3441 bnx2_enable_nvram_access(bp);
3443 /* Reconfigure the flash interface */
3444 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3445 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3446 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3447 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3449 /* Disable access to flash interface */
3450 bnx2_disable_nvram_access(bp);
3451 bnx2_release_nvram_lock(bp);
3456 } /* if (val & 0x40000000) */
3458 if (j == entry_count) {
3459 bp->flash_info = NULL;
3460 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3465 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3466 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3468 bp->flash_size = val;
3470 bp->flash_size = bp->flash_info->total_size;
3476 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3480 u32 cmd_flags, offset32, len32, extra;
3485 /* Request access to the flash interface. */
3486 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3489 /* Enable access to flash interface */
3490 bnx2_enable_nvram_access(bp);
3503 pre_len = 4 - (offset & 3);
3505 if (pre_len >= len32) {
3507 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3508 BNX2_NVM_COMMAND_LAST;
3511 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3514 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3519 memcpy(ret_buf, buf + (offset & 3), pre_len);
3526 extra = 4 - (len32 & 3);
3527 len32 = (len32 + 4) & ~3;
3534 cmd_flags = BNX2_NVM_COMMAND_LAST;
3536 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3537 BNX2_NVM_COMMAND_LAST;
3539 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3541 memcpy(ret_buf, buf, 4 - extra);
3543 else if (len32 > 0) {
3546 /* Read the first word. */
3550 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3552 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3554 /* Advance to the next dword. */
3559 while (len32 > 4 && rc == 0) {
3560 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3562 /* Advance to the next dword. */
3571 cmd_flags = BNX2_NVM_COMMAND_LAST;
3572 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3574 memcpy(ret_buf, buf, 4 - extra);
3577 /* Disable access to flash interface */
3578 bnx2_disable_nvram_access(bp);
3580 bnx2_release_nvram_lock(bp);
3586 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3589 u32 written, offset32, len32;
3590 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3592 int align_start, align_end;
3597 align_start = align_end = 0;
3599 if ((align_start = (offset32 & 3))) {
3601 len32 += align_start;
3604 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3609 align_end = 4 - (len32 & 3);
3611 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3615 if (align_start || align_end) {
3616 align_buf = kmalloc(len32, GFP_KERNEL);
3617 if (align_buf == NULL)
3620 memcpy(align_buf, start, 4);
3623 memcpy(align_buf + len32 - 4, end, 4);
3625 memcpy(align_buf + align_start, data_buf, buf_size);
3629 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3630 flash_buffer = kmalloc(264, GFP_KERNEL);
3631 if (flash_buffer == NULL) {
3633 goto nvram_write_end;
3638 while ((written < len32) && (rc == 0)) {
3639 u32 page_start, page_end, data_start, data_end;
3640 u32 addr, cmd_flags;
3643 /* Find the page_start addr */
3644 page_start = offset32 + written;
3645 page_start -= (page_start % bp->flash_info->page_size);
3646 /* Find the page_end addr */
3647 page_end = page_start + bp->flash_info->page_size;
3648 /* Find the data_start addr */
3649 data_start = (written == 0) ? offset32 : page_start;
3650 /* Find the data_end addr */
3651 data_end = (page_end > offset32 + len32) ?
3652 (offset32 + len32) : page_end;
3654 /* Request access to the flash interface. */
3655 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3656 goto nvram_write_end;
3658 /* Enable access to flash interface */
3659 bnx2_enable_nvram_access(bp);
3661 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3662 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3665 /* Read the whole page into the buffer
3666 * (non-buffer flash only) */
3667 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3668 if (j == (bp->flash_info->page_size - 4)) {
3669 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3671 rc = bnx2_nvram_read_dword(bp,
3677 goto nvram_write_end;
3683 /* Enable writes to flash interface (unlock write-protect) */
3684 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3685 goto nvram_write_end;
3687 /* Loop to write back the buffer data from page_start to
3690 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3691 /* Erase the page */
3692 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3693 goto nvram_write_end;
3695 /* Re-enable the write again for the actual write */
3696 bnx2_enable_nvram_write(bp);
3698 for (addr = page_start; addr < data_start;
3699 addr += 4, i += 4) {
3701 rc = bnx2_nvram_write_dword(bp, addr,
3702 &flash_buffer[i], cmd_flags);
3705 goto nvram_write_end;
3711 /* Loop to write the new data from data_start to data_end */
3712 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3713 if ((addr == page_end - 4) ||
3714 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
3715 (addr == data_end - 4))) {
3717 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3719 rc = bnx2_nvram_write_dword(bp, addr, buf,
3723 goto nvram_write_end;
3729 /* Loop to write back the buffer data from data_end
3731 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3732 for (addr = data_end; addr < page_end;
3733 addr += 4, i += 4) {
3735 if (addr == page_end-4) {
3736 cmd_flags = BNX2_NVM_COMMAND_LAST;
3738 rc = bnx2_nvram_write_dword(bp, addr,
3739 &flash_buffer[i], cmd_flags);
3742 goto nvram_write_end;
3748 /* Disable writes to flash interface (lock write-protect) */
3749 bnx2_disable_nvram_write(bp);
3751 /* Disable access to flash interface */
3752 bnx2_disable_nvram_access(bp);
3753 bnx2_release_nvram_lock(bp);
3755 /* Increment written */
3756 written += data_end - data_start;
3760 kfree(flash_buffer);
3766 bnx2_init_remote_phy(struct bnx2 *bp)
3770 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3771 if (!(bp->phy_flags & PHY_SERDES_FLAG))
3774 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3775 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3778 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3779 if (netif_running(bp->dev)) {
3780 val = BNX2_DRV_ACK_CAP_SIGNATURE |
3781 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3782 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3785 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3787 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3788 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3789 bp->phy_port = PORT_FIBRE;
3791 bp->phy_port = PORT_TP;
3796 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3801 /* Wait for the current PCI transaction to complete before
3802 * issuing a reset. */
3803 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3804 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3805 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3806 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3807 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3808 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3811 /* Wait for the firmware to tell us it is ok to issue a reset. */
3812 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3814 /* Deposit a driver reset signature so the firmware knows that
3815 * this is a soft reset. */
3816 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3817 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3819 /* Do a dummy read to force the chip to complete all current transaction
3820 * before we issue a reset. */
3821 val = REG_RD(bp, BNX2_MISC_ID);
3823 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3824 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3825 REG_RD(bp, BNX2_MISC_COMMAND);
3828 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3829 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3831 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3834 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3835 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3836 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3839 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3841 /* Reading back any register after chip reset will hang the
3842 * bus on 5706 A0 and A1. The msleep below provides plenty
3843 * of margin for write posting.
3845 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3846 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3849 /* Reset takes approximate 30 usec */
3850 for (i = 0; i < 10; i++) {
3851 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3852 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3853 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3858 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3859 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3860 printk(KERN_ERR PFX "Chip reset did not complete\n");
3865 /* Make sure byte swapping is properly configured. */
3866 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3867 if (val != 0x01020304) {
3868 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3872 /* Wait for the firmware to finish its initialization. */
3873 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3877 spin_lock_bh(&bp->phy_lock);
3878 bnx2_init_remote_phy(bp);
3879 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
3880 bnx2_set_default_remote_link(bp);
3881 spin_unlock_bh(&bp->phy_lock);
3883 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3884 /* Adjust the voltage regular to two steps lower. The default
3885 * of this register is 0x0000000e. */
3886 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3888 /* Remove bad rbuf memory from the free pool. */
3889 rc = bnx2_alloc_bad_rbuf(bp);
3896 bnx2_init_chip(struct bnx2 *bp)
3901 /* Make sure the interrupt is not active. */
3902 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3904 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3905 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3907 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3909 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3910 DMA_READ_CHANS << 12 |
3911 DMA_WRITE_CHANS << 16;
3913 val |= (0x2 << 20) | (1 << 11);
3915 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3918 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3919 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3920 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3922 REG_WR(bp, BNX2_DMA_CONFIG, val);
3924 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3925 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3926 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3927 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3930 if (bp->flags & PCIX_FLAG) {
3933 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3935 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3936 val16 & ~PCI_X_CMD_ERO);
3939 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3940 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3941 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3942 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3944 /* Initialize context mapping and zero out the quick contexts. The
3945 * context block must have already been enabled. */
3946 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3947 rc = bnx2_init_5709_context(bp);
3951 bnx2_init_context(bp);
3953 if ((rc = bnx2_init_cpus(bp)) != 0)
3956 bnx2_init_nvram(bp);
3958 bnx2_set_mac_addr(bp);
3960 val = REG_RD(bp, BNX2_MQ_CONFIG);
3961 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3962 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3963 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3964 val |= BNX2_MQ_CONFIG_HALT_DIS;
3966 REG_WR(bp, BNX2_MQ_CONFIG, val);
3968 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3969 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3970 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3972 val = (BCM_PAGE_BITS - 8) << 24;
3973 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3975 /* Configure page size. */
3976 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3977 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3978 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3979 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3981 val = bp->mac_addr[0] +
3982 (bp->mac_addr[1] << 8) +
3983 (bp->mac_addr[2] << 16) +
3985 (bp->mac_addr[4] << 8) +
3986 (bp->mac_addr[5] << 16);
3987 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3989 /* Program the MTU. Also include 4 bytes for CRC32. */
3990 val = bp->dev->mtu + ETH_HLEN + 4;
3991 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3992 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3993 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3995 bp->last_status_idx = 0;
3996 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3998 /* Set up how to generate a link change interrupt. */
3999 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4001 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4002 (u64) bp->status_blk_mapping & 0xffffffff);
4003 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4005 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4006 (u64) bp->stats_blk_mapping & 0xffffffff);
4007 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4008 (u64) bp->stats_blk_mapping >> 32);
4010 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4011 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4013 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4014 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4016 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4017 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4019 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4021 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4023 REG_WR(bp, BNX2_HC_COM_TICKS,
4024 (bp->com_ticks_int << 16) | bp->com_ticks);
4026 REG_WR(bp, BNX2_HC_CMD_TICKS,
4027 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4029 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4030 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4032 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4033 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4035 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4036 val = BNX2_HC_CONFIG_COLLECT_STATS;
4038 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4039 BNX2_HC_CONFIG_COLLECT_STATS;
4042 if (bp->flags & ONE_SHOT_MSI_FLAG)
4043 val |= BNX2_HC_CONFIG_ONE_SHOT;
4045 REG_WR(bp, BNX2_HC_CONFIG, val);
4047 /* Clear internal stats counters. */
4048 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4050 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4052 /* Initialize the receive filter. */
4053 bnx2_set_rx_mode(bp->dev);
4055 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4056 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4057 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4058 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4060 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4063 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4064 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4068 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4074 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4076 u32 val, offset0, offset1, offset2, offset3;
4078 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4079 offset0 = BNX2_L2CTX_TYPE_XI;
4080 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4081 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4082 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4084 offset0 = BNX2_L2CTX_TYPE;
4085 offset1 = BNX2_L2CTX_CMD_TYPE;
4086 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4087 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4089 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4090 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4092 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4093 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4095 val = (u64) bp->tx_desc_mapping >> 32;
4096 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4098 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4099 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4103 bnx2_init_tx_ring(struct bnx2 *bp)
4108 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4110 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4112 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4113 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4118 bp->tx_prod_bseq = 0;
4121 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4122 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4124 bnx2_init_tx_context(bp, cid);
4128 bnx2_init_rx_ring(struct bnx2 *bp)
4132 u16 prod, ring_prod;
4135 /* 8 for CRC and VLAN */
4136 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4138 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4140 ring_prod = prod = bp->rx_prod = 0;
4143 bp->rx_prod_bseq = 0;
4145 for (i = 0; i < bp->rx_max_ring; i++) {
4148 rxbd = &bp->rx_desc_ring[i][0];
4149 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4150 rxbd->rx_bd_len = bp->rx_buf_use_size;
4151 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4153 if (i == (bp->rx_max_ring - 1))
4157 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
4158 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
4162 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4163 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4165 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
4167 val = (u64) bp->rx_desc_mapping[0] >> 32;
4168 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
4170 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4171 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
4173 for (i = 0; i < bp->rx_ring_size; i++) {
4174 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4177 prod = NEXT_RX_BD(prod);
4178 ring_prod = RX_RING_IDX(prod);
4182 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4184 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4188 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4192 bp->rx_ring_size = size;
4194 while (size > MAX_RX_DESC_CNT) {
4195 size -= MAX_RX_DESC_CNT;
4198 /* round to next power of 2 */
4200 while ((max & num_rings) == 0)
4203 if (num_rings != max)
4206 bp->rx_max_ring = max;
4207 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4211 bnx2_free_tx_skbs(struct bnx2 *bp)
4215 if (bp->tx_buf_ring == NULL)
4218 for (i = 0; i < TX_DESC_CNT; ) {
4219 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4220 struct sk_buff *skb = tx_buf->skb;
4228 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4229 skb_headlen(skb), PCI_DMA_TODEVICE);
4233 last = skb_shinfo(skb)->nr_frags;
4234 for (j = 0; j < last; j++) {
4235 tx_buf = &bp->tx_buf_ring[i + j + 1];
4236 pci_unmap_page(bp->pdev,
4237 pci_unmap_addr(tx_buf, mapping),
4238 skb_shinfo(skb)->frags[j].size,
4248 bnx2_free_rx_skbs(struct bnx2 *bp)
4252 if (bp->rx_buf_ring == NULL)
4255 for (i = 0; i < bp->rx_max_ring_idx; i++) {
4256 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4257 struct sk_buff *skb = rx_buf->skb;
4262 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4263 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4272 bnx2_free_skbs(struct bnx2 *bp)
4274 bnx2_free_tx_skbs(bp);
4275 bnx2_free_rx_skbs(bp);
4279 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4283 rc = bnx2_reset_chip(bp, reset_code);
4288 if ((rc = bnx2_init_chip(bp)) != 0)
4291 bnx2_init_tx_ring(bp);
4292 bnx2_init_rx_ring(bp);
4297 bnx2_init_nic(struct bnx2 *bp)
4301 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4304 spin_lock_bh(&bp->phy_lock);
4307 spin_unlock_bh(&bp->phy_lock);
4312 bnx2_test_registers(struct bnx2 *bp)
4316 static const struct {
4319 #define BNX2_FL_NOT_5709 1
4323 { 0x006c, 0, 0x00000000, 0x0000003f },
4324 { 0x0090, 0, 0xffffffff, 0x00000000 },
4325 { 0x0094, 0, 0x00000000, 0x00000000 },
4327 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4328 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4329 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4330 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4331 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4332 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4333 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4334 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4335 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4337 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4338 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4339 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4340 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4341 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4342 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4344 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4345 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4346 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4348 { 0x1000, 0, 0x00000000, 0x00000001 },
4349 { 0x1004, 0, 0x00000000, 0x000f0001 },
4351 { 0x1408, 0, 0x01c00800, 0x00000000 },
4352 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4353 { 0x14a8, 0, 0x00000000, 0x000001ff },
4354 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4355 { 0x14b0, 0, 0x00000002, 0x00000001 },
4356 { 0x14b8, 0, 0x00000000, 0x00000000 },
4357 { 0x14c0, 0, 0x00000000, 0x00000009 },
4358 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4359 { 0x14cc, 0, 0x00000000, 0x00000001 },
4360 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4362 { 0x1800, 0, 0x00000000, 0x00000001 },
4363 { 0x1804, 0, 0x00000000, 0x00000003 },
4365 { 0x2800, 0, 0x00000000, 0x00000001 },
4366 { 0x2804, 0, 0x00000000, 0x00003f01 },
4367 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4368 { 0x2810, 0, 0xffff0000, 0x00000000 },
4369 { 0x2814, 0, 0xffff0000, 0x00000000 },
4370 { 0x2818, 0, 0xffff0000, 0x00000000 },
4371 { 0x281c, 0, 0xffff0000, 0x00000000 },
4372 { 0x2834, 0, 0xffffffff, 0x00000000 },
4373 { 0x2840, 0, 0x00000000, 0xffffffff },
4374 { 0x2844, 0, 0x00000000, 0xffffffff },
4375 { 0x2848, 0, 0xffffffff, 0x00000000 },
4376 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4378 { 0x2c00, 0, 0x00000000, 0x00000011 },
4379 { 0x2c04, 0, 0x00000000, 0x00030007 },
4381 { 0x3c00, 0, 0x00000000, 0x00000001 },
4382 { 0x3c04, 0, 0x00000000, 0x00070000 },
4383 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4384 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4385 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4386 { 0x3c14, 0, 0x00000000, 0xffffffff },
4387 { 0x3c18, 0, 0x00000000, 0xffffffff },
4388 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4389 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4391 { 0x5004, 0, 0x00000000, 0x0000007f },
4392 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4394 { 0x5c00, 0, 0x00000000, 0x00000001 },
4395 { 0x5c04, 0, 0x00000000, 0x0003000f },
4396 { 0x5c08, 0, 0x00000003, 0x00000000 },
4397 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4398 { 0x5c10, 0, 0x00000000, 0xffffffff },
4399 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4400 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4401 { 0x5c88, 0, 0x00000000, 0x00077373 },
4402 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4404 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4405 { 0x680c, 0, 0xffffffff, 0x00000000 },
4406 { 0x6810, 0, 0xffffffff, 0x00000000 },
4407 { 0x6814, 0, 0xffffffff, 0x00000000 },
4408 { 0x6818, 0, 0xffffffff, 0x00000000 },
4409 { 0x681c, 0, 0xffffffff, 0x00000000 },
4410 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4411 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4412 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4413 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4414 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4415 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4416 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4417 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4418 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4419 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4420 { 0x684c, 0, 0xffffffff, 0x00000000 },
4421 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4422 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4423 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4424 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4425 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4426 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4428 { 0xffff, 0, 0x00000000, 0x00000000 },
4433 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4436 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4437 u32 offset, rw_mask, ro_mask, save_val, val;
4438 u16 flags = reg_tbl[i].flags;
4440 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4443 offset = (u32) reg_tbl[i].offset;
4444 rw_mask = reg_tbl[i].rw_mask;
4445 ro_mask = reg_tbl[i].ro_mask;
4447 save_val = readl(bp->regview + offset);
4449 writel(0, bp->regview + offset);
4451 val = readl(bp->regview + offset);
4452 if ((val & rw_mask) != 0) {
4456 if ((val & ro_mask) != (save_val & ro_mask)) {
4460 writel(0xffffffff, bp->regview + offset);
4462 val = readl(bp->regview + offset);
4463 if ((val & rw_mask) != rw_mask) {
4467 if ((val & ro_mask) != (save_val & ro_mask)) {
4471 writel(save_val, bp->regview + offset);
4475 writel(save_val, bp->regview + offset);
4483 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4485 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4486 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4489 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4492 for (offset = 0; offset < size; offset += 4) {
4494 REG_WR_IND(bp, start + offset, test_pattern[i]);
4496 if (REG_RD_IND(bp, start + offset) !=
4506 bnx2_test_memory(struct bnx2 *bp)
4510 static struct mem_entry {
4513 } mem_tbl_5706[] = {
4514 { 0x60000, 0x4000 },
4515 { 0xa0000, 0x3000 },
4516 { 0xe0000, 0x4000 },
4517 { 0x120000, 0x4000 },
4518 { 0x1a0000, 0x4000 },
4519 { 0x160000, 0x4000 },
4523 { 0x60000, 0x4000 },
4524 { 0xa0000, 0x3000 },
4525 { 0xe0000, 0x4000 },
4526 { 0x120000, 0x4000 },
4527 { 0x1a0000, 0x4000 },
4530 struct mem_entry *mem_tbl;
4532 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4533 mem_tbl = mem_tbl_5709;
4535 mem_tbl = mem_tbl_5706;
4537 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4538 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4539 mem_tbl[i].len)) != 0) {
4547 #define BNX2_MAC_LOOPBACK 0
4548 #define BNX2_PHY_LOOPBACK 1
4551 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4553 unsigned int pkt_size, num_pkts, i;
4554 struct sk_buff *skb, *rx_skb;
4555 unsigned char *packet;
4556 u16 rx_start_idx, rx_idx;
4559 struct sw_bd *rx_buf;
4560 struct l2_fhdr *rx_hdr;
4563 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4564 bp->loopback = MAC_LOOPBACK;
4565 bnx2_set_mac_loopback(bp);
4567 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4568 bp->loopback = PHY_LOOPBACK;
4569 bnx2_set_phy_loopback(bp);
4575 skb = netdev_alloc_skb(bp->dev, pkt_size);
4578 packet = skb_put(skb, pkt_size);
4579 memcpy(packet, bp->dev->dev_addr, 6);
4580 memset(packet + 6, 0x0, 8);
4581 for (i = 14; i < pkt_size; i++)
4582 packet[i] = (unsigned char) (i & 0xff);
4584 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4587 REG_WR(bp, BNX2_HC_COMMAND,
4588 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4590 REG_RD(bp, BNX2_HC_COMMAND);
4593 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4597 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4599 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4600 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4601 txbd->tx_bd_mss_nbytes = pkt_size;
4602 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4605 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4606 bp->tx_prod_bseq += pkt_size;
4608 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4609 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4613 REG_WR(bp, BNX2_HC_COMMAND,
4614 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4616 REG_RD(bp, BNX2_HC_COMMAND);
4620 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4623 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4624 goto loopback_test_done;
4627 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4628 if (rx_idx != rx_start_idx + num_pkts) {
4629 goto loopback_test_done;
4632 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4633 rx_skb = rx_buf->skb;
4635 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4636 skb_reserve(rx_skb, bp->rx_offset);
4638 pci_dma_sync_single_for_cpu(bp->pdev,
4639 pci_unmap_addr(rx_buf, mapping),
4640 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4642 if (rx_hdr->l2_fhdr_status &
4643 (L2_FHDR_ERRORS_BAD_CRC |
4644 L2_FHDR_ERRORS_PHY_DECODE |
4645 L2_FHDR_ERRORS_ALIGNMENT |
4646 L2_FHDR_ERRORS_TOO_SHORT |
4647 L2_FHDR_ERRORS_GIANT_FRAME)) {
4649 goto loopback_test_done;
4652 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4653 goto loopback_test_done;
4656 for (i = 14; i < pkt_size; i++) {
4657 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4658 goto loopback_test_done;
4669 #define BNX2_MAC_LOOPBACK_FAILED 1
4670 #define BNX2_PHY_LOOPBACK_FAILED 2
4671 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4672 BNX2_PHY_LOOPBACK_FAILED)
4675 bnx2_test_loopback(struct bnx2 *bp)
4679 if (!netif_running(bp->dev))
4680 return BNX2_LOOPBACK_FAILED;
4682 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4683 spin_lock_bh(&bp->phy_lock);
4685 spin_unlock_bh(&bp->phy_lock);
4686 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4687 rc |= BNX2_MAC_LOOPBACK_FAILED;
4688 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4689 rc |= BNX2_PHY_LOOPBACK_FAILED;
4693 #define NVRAM_SIZE 0x200
4694 #define CRC32_RESIDUAL 0xdebb20e3
4697 bnx2_test_nvram(struct bnx2 *bp)
4699 u32 buf[NVRAM_SIZE / 4];
4700 u8 *data = (u8 *) buf;
4704 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4705 goto test_nvram_done;
4707 magic = be32_to_cpu(buf[0]);
4708 if (magic != 0x669955aa) {
4710 goto test_nvram_done;
4713 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4714 goto test_nvram_done;
4716 csum = ether_crc_le(0x100, data);
4717 if (csum != CRC32_RESIDUAL) {
4719 goto test_nvram_done;
4722 csum = ether_crc_le(0x100, data + 0x100);
4723 if (csum != CRC32_RESIDUAL) {
4732 bnx2_test_link(struct bnx2 *bp)
4736 spin_lock_bh(&bp->phy_lock);
4737 bnx2_enable_bmsr1(bp);
4738 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4739 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4740 bnx2_disable_bmsr1(bp);
4741 spin_unlock_bh(&bp->phy_lock);
4743 if (bmsr & BMSR_LSTATUS) {
4750 bnx2_test_intr(struct bnx2 *bp)
4755 if (!netif_running(bp->dev))
4758 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4760 /* This register is not touched during run-time. */
4761 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4762 REG_RD(bp, BNX2_HC_COMMAND);
4764 for (i = 0; i < 10; i++) {
4765 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4771 msleep_interruptible(10);
4780 bnx2_5706_serdes_timer(struct bnx2 *bp)
4782 spin_lock(&bp->phy_lock);
4783 if (bp->serdes_an_pending)
4784 bp->serdes_an_pending--;
4785 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4788 bp->current_interval = bp->timer_interval;
4790 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4792 if (bmcr & BMCR_ANENABLE) {
4795 bnx2_write_phy(bp, 0x1c, 0x7c00);
4796 bnx2_read_phy(bp, 0x1c, &phy1);
4798 bnx2_write_phy(bp, 0x17, 0x0f01);
4799 bnx2_read_phy(bp, 0x15, &phy2);
4800 bnx2_write_phy(bp, 0x17, 0x0f01);
4801 bnx2_read_phy(bp, 0x15, &phy2);
4803 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4804 !(phy2 & 0x20)) { /* no CONFIG */
4806 bmcr &= ~BMCR_ANENABLE;
4807 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4808 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4809 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4813 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4814 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4817 bnx2_write_phy(bp, 0x17, 0x0f01);
4818 bnx2_read_phy(bp, 0x15, &phy2);
4822 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4823 bmcr |= BMCR_ANENABLE;
4824 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4826 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4829 bp->current_interval = bp->timer_interval;
4831 spin_unlock(&bp->phy_lock);
4835 bnx2_5708_serdes_timer(struct bnx2 *bp)
4837 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4840 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4841 bp->serdes_an_pending = 0;
4845 spin_lock(&bp->phy_lock);
4846 if (bp->serdes_an_pending)
4847 bp->serdes_an_pending--;
4848 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4851 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4852 if (bmcr & BMCR_ANENABLE) {
4853 bnx2_enable_forced_2g5(bp);
4854 bp->current_interval = SERDES_FORCED_TIMEOUT;
4856 bnx2_disable_forced_2g5(bp);
4857 bp->serdes_an_pending = 2;
4858 bp->current_interval = bp->timer_interval;
4862 bp->current_interval = bp->timer_interval;
4864 spin_unlock(&bp->phy_lock);
4868 bnx2_timer(unsigned long data)
4870 struct bnx2 *bp = (struct bnx2 *) data;
4872 if (!netif_running(bp->dev))
4875 if (atomic_read(&bp->intr_sem) != 0)
4876 goto bnx2_restart_timer;
4878 bnx2_send_heart_beat(bp);
4880 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4882 /* workaround occasional corrupted counters */
4883 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4884 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4885 BNX2_HC_COMMAND_STATS_NOW);
4887 if (bp->phy_flags & PHY_SERDES_FLAG) {
4888 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4889 bnx2_5706_serdes_timer(bp);
4891 bnx2_5708_serdes_timer(bp);
4895 mod_timer(&bp->timer, jiffies + bp->current_interval);
4899 bnx2_request_irq(struct bnx2 *bp)
4901 struct net_device *dev = bp->dev;
4904 if (bp->flags & USING_MSI_FLAG) {
4905 irq_handler_t fn = bnx2_msi;
4907 if (bp->flags & ONE_SHOT_MSI_FLAG)
4908 fn = bnx2_msi_1shot;
4910 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4912 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4913 IRQF_SHARED, dev->name, dev);
4918 bnx2_free_irq(struct bnx2 *bp)
4920 struct net_device *dev = bp->dev;
4922 if (bp->flags & USING_MSI_FLAG) {
4923 free_irq(bp->pdev->irq, dev);
4924 pci_disable_msi(bp->pdev);
4925 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4927 free_irq(bp->pdev->irq, dev);
4930 /* Called with rtnl_lock */
4932 bnx2_open(struct net_device *dev)
4934 struct bnx2 *bp = netdev_priv(dev);
4937 netif_carrier_off(dev);
4939 bnx2_set_power_state(bp, PCI_D0);
4940 bnx2_disable_int(bp);
4942 rc = bnx2_alloc_mem(bp);
4946 napi_enable(&bp->napi);
4948 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
4949 if (pci_enable_msi(bp->pdev) == 0) {
4950 bp->flags |= USING_MSI_FLAG;
4951 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4952 bp->flags |= ONE_SHOT_MSI_FLAG;
4955 rc = bnx2_request_irq(bp);
4958 napi_disable(&bp->napi);
4963 rc = bnx2_init_nic(bp);
4966 napi_disable(&bp->napi);
4973 mod_timer(&bp->timer, jiffies + bp->current_interval);
4975 atomic_set(&bp->intr_sem, 0);
4977 bnx2_enable_int(bp);
4979 if (bp->flags & USING_MSI_FLAG) {
4980 /* Test MSI to make sure it is working
4981 * If MSI test fails, go back to INTx mode
4983 if (bnx2_test_intr(bp) != 0) {
4984 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4985 " using MSI, switching to INTx mode. Please"
4986 " report this failure to the PCI maintainer"
4987 " and include system chipset information.\n",
4990 bnx2_disable_int(bp);
4993 rc = bnx2_init_nic(bp);
4996 rc = bnx2_request_irq(bp);
4999 napi_disable(&bp->napi);
5002 del_timer_sync(&bp->timer);
5005 bnx2_enable_int(bp);
5008 if (bp->flags & USING_MSI_FLAG) {
5009 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5012 netif_start_queue(dev);
5018 bnx2_reset_task(struct work_struct *work)
5020 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5022 if (!netif_running(bp->dev))
5025 bp->in_reset_task = 1;
5026 bnx2_netif_stop(bp);
5030 atomic_set(&bp->intr_sem, 1);
5031 bnx2_netif_start(bp);
5032 bp->in_reset_task = 0;
5036 bnx2_tx_timeout(struct net_device *dev)
5038 struct bnx2 *bp = netdev_priv(dev);
5040 /* This allows the netif to be shutdown gracefully before resetting */
5041 schedule_work(&bp->reset_task);
5045 /* Called with rtnl_lock */
5047 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5049 struct bnx2 *bp = netdev_priv(dev);
5051 bnx2_netif_stop(bp);
5054 bnx2_set_rx_mode(dev);
5056 bnx2_netif_start(bp);
5060 /* Called with netif_tx_lock.
5061 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5062 * netif_wake_queue().
5065 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5067 struct bnx2 *bp = netdev_priv(dev);
5070 struct sw_bd *tx_buf;
5071 u32 len, vlan_tag_flags, last_frag, mss;
5072 u16 prod, ring_prod;
5075 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
5076 netif_stop_queue(dev);
5077 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5080 return NETDEV_TX_BUSY;
5082 len = skb_headlen(skb);
5084 ring_prod = TX_RING_IDX(prod);
5087 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5088 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5091 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5093 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5095 if ((mss = skb_shinfo(skb)->gso_size)) {
5096 u32 tcp_opt_len, ip_tcp_len;
5099 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5101 tcp_opt_len = tcp_optlen(skb);
5103 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5104 u32 tcp_off = skb_transport_offset(skb) -
5105 sizeof(struct ipv6hdr) - ETH_HLEN;
5107 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5108 TX_BD_FLAGS_SW_FLAGS;
5109 if (likely(tcp_off == 0))
5110 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5113 vlan_tag_flags |= ((tcp_off & 0x3) <<
5114 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5115 ((tcp_off & 0x10) <<
5116 TX_BD_FLAGS_TCP6_OFF4_SHL);
5117 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5120 if (skb_header_cloned(skb) &&
5121 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5123 return NETDEV_TX_OK;
5126 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5130 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5131 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5135 if (tcp_opt_len || (iph->ihl > 5)) {
5136 vlan_tag_flags |= ((iph->ihl - 5) +
5137 (tcp_opt_len >> 2)) << 8;
5143 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5145 tx_buf = &bp->tx_buf_ring[ring_prod];
5147 pci_unmap_addr_set(tx_buf, mapping, mapping);
5149 txbd = &bp->tx_desc_ring[ring_prod];
5151 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5152 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5153 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5154 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5156 last_frag = skb_shinfo(skb)->nr_frags;
5158 for (i = 0; i < last_frag; i++) {
5159 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5161 prod = NEXT_TX_BD(prod);
5162 ring_prod = TX_RING_IDX(prod);
5163 txbd = &bp->tx_desc_ring[ring_prod];
5166 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5167 len, PCI_DMA_TODEVICE);
5168 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5171 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5172 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5173 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5174 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5177 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5179 prod = NEXT_TX_BD(prod);
5180 bp->tx_prod_bseq += skb->len;
5182 REG_WR16(bp, bp->tx_bidx_addr, prod);
5183 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5188 dev->trans_start = jiffies;
5190 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
5191 netif_stop_queue(dev);
5192 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
5193 netif_wake_queue(dev);
5196 return NETDEV_TX_OK;
5199 /* Called with rtnl_lock */
5201 bnx2_close(struct net_device *dev)
5203 struct bnx2 *bp = netdev_priv(dev);
5206 /* Calling flush_scheduled_work() may deadlock because
5207 * linkwatch_event() may be on the workqueue and it will try to get
5208 * the rtnl_lock which we are holding.
5210 while (bp->in_reset_task)
5213 bnx2_disable_int_sync(bp);
5214 napi_disable(&bp->napi);
5215 del_timer_sync(&bp->timer);
5216 if (bp->flags & NO_WOL_FLAG)
5217 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5219 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5221 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5222 bnx2_reset_chip(bp, reset_code);
5227 netif_carrier_off(bp->dev);
5228 bnx2_set_power_state(bp, PCI_D3hot);
5232 #define GET_NET_STATS64(ctr) \
5233 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5234 (unsigned long) (ctr##_lo)
5236 #define GET_NET_STATS32(ctr) \
5239 #if (BITS_PER_LONG == 64)
5240 #define GET_NET_STATS GET_NET_STATS64
5242 #define GET_NET_STATS GET_NET_STATS32
5245 static struct net_device_stats *
5246 bnx2_get_stats(struct net_device *dev)
5248 struct bnx2 *bp = netdev_priv(dev);
5249 struct statistics_block *stats_blk = bp->stats_blk;
5250 struct net_device_stats *net_stats = &bp->net_stats;
5252 if (bp->stats_blk == NULL) {
5255 net_stats->rx_packets =
5256 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5257 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5258 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5260 net_stats->tx_packets =
5261 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5262 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5263 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5265 net_stats->rx_bytes =
5266 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5268 net_stats->tx_bytes =
5269 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5271 net_stats->multicast =
5272 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5274 net_stats->collisions =
5275 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5277 net_stats->rx_length_errors =
5278 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5279 stats_blk->stat_EtherStatsOverrsizePkts);
5281 net_stats->rx_over_errors =
5282 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5284 net_stats->rx_frame_errors =
5285 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5287 net_stats->rx_crc_errors =
5288 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5290 net_stats->rx_errors = net_stats->rx_length_errors +
5291 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5292 net_stats->rx_crc_errors;
5294 net_stats->tx_aborted_errors =
5295 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5296 stats_blk->stat_Dot3StatsLateCollisions);
5298 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5299 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5300 net_stats->tx_carrier_errors = 0;
5302 net_stats->tx_carrier_errors =
5304 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5307 net_stats->tx_errors =
5309 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5311 net_stats->tx_aborted_errors +
5312 net_stats->tx_carrier_errors;
5314 net_stats->rx_missed_errors =
5315 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5316 stats_blk->stat_FwRxDrop);
5321 /* All ethtool functions called with rtnl_lock */
5324 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5326 struct bnx2 *bp = netdev_priv(dev);
5327 int support_serdes = 0, support_copper = 0;
5329 cmd->supported = SUPPORTED_Autoneg;
5330 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5333 } else if (bp->phy_port == PORT_FIBRE)
5338 if (support_serdes) {
5339 cmd->supported |= SUPPORTED_1000baseT_Full |
5341 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5342 cmd->supported |= SUPPORTED_2500baseX_Full;
5345 if (support_copper) {
5346 cmd->supported |= SUPPORTED_10baseT_Half |
5347 SUPPORTED_10baseT_Full |
5348 SUPPORTED_100baseT_Half |
5349 SUPPORTED_100baseT_Full |
5350 SUPPORTED_1000baseT_Full |
5355 spin_lock_bh(&bp->phy_lock);
5356 cmd->port = bp->phy_port;
5357 cmd->advertising = bp->advertising;
5359 if (bp->autoneg & AUTONEG_SPEED) {
5360 cmd->autoneg = AUTONEG_ENABLE;
5363 cmd->autoneg = AUTONEG_DISABLE;
5366 if (netif_carrier_ok(dev)) {
5367 cmd->speed = bp->line_speed;
5368 cmd->duplex = bp->duplex;
5374 spin_unlock_bh(&bp->phy_lock);
5376 cmd->transceiver = XCVR_INTERNAL;
5377 cmd->phy_address = bp->phy_addr;
5383 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5385 struct bnx2 *bp = netdev_priv(dev);
5386 u8 autoneg = bp->autoneg;
5387 u8 req_duplex = bp->req_duplex;
5388 u16 req_line_speed = bp->req_line_speed;
5389 u32 advertising = bp->advertising;
5392 spin_lock_bh(&bp->phy_lock);
5394 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5395 goto err_out_unlock;
5397 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5398 goto err_out_unlock;
5400 if (cmd->autoneg == AUTONEG_ENABLE) {
5401 autoneg |= AUTONEG_SPEED;
5403 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5405 /* allow advertising 1 speed */
5406 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5407 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5408 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5409 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5411 if (cmd->port == PORT_FIBRE)
5412 goto err_out_unlock;
5414 advertising = cmd->advertising;
5416 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5417 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5418 (cmd->port == PORT_TP))
5419 goto err_out_unlock;
5420 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5421 advertising = cmd->advertising;
5422 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5423 goto err_out_unlock;
5425 if (cmd->port == PORT_FIBRE)
5426 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5428 advertising = ETHTOOL_ALL_COPPER_SPEED;
5430 advertising |= ADVERTISED_Autoneg;
5433 if (cmd->port == PORT_FIBRE) {
5434 if ((cmd->speed != SPEED_1000 &&
5435 cmd->speed != SPEED_2500) ||
5436 (cmd->duplex != DUPLEX_FULL))
5437 goto err_out_unlock;
5439 if (cmd->speed == SPEED_2500 &&
5440 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5441 goto err_out_unlock;
5443 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5444 goto err_out_unlock;
5446 autoneg &= ~AUTONEG_SPEED;
5447 req_line_speed = cmd->speed;
5448 req_duplex = cmd->duplex;
5452 bp->autoneg = autoneg;
5453 bp->advertising = advertising;
5454 bp->req_line_speed = req_line_speed;
5455 bp->req_duplex = req_duplex;
5457 err = bnx2_setup_phy(bp, cmd->port);
5460 spin_unlock_bh(&bp->phy_lock);
5466 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5468 struct bnx2 *bp = netdev_priv(dev);
5470 strcpy(info->driver, DRV_MODULE_NAME);
5471 strcpy(info->version, DRV_MODULE_VERSION);
5472 strcpy(info->bus_info, pci_name(bp->pdev));
5473 strcpy(info->fw_version, bp->fw_version);
5476 #define BNX2_REGDUMP_LEN (32 * 1024)
5479 bnx2_get_regs_len(struct net_device *dev)
5481 return BNX2_REGDUMP_LEN;
5485 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5487 u32 *p = _p, i, offset;
5489 struct bnx2 *bp = netdev_priv(dev);
5490 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5491 0x0800, 0x0880, 0x0c00, 0x0c10,
5492 0x0c30, 0x0d08, 0x1000, 0x101c,
5493 0x1040, 0x1048, 0x1080, 0x10a4,
5494 0x1400, 0x1490, 0x1498, 0x14f0,
5495 0x1500, 0x155c, 0x1580, 0x15dc,
5496 0x1600, 0x1658, 0x1680, 0x16d8,
5497 0x1800, 0x1820, 0x1840, 0x1854,
5498 0x1880, 0x1894, 0x1900, 0x1984,
5499 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5500 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5501 0x2000, 0x2030, 0x23c0, 0x2400,
5502 0x2800, 0x2820, 0x2830, 0x2850,
5503 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5504 0x3c00, 0x3c94, 0x4000, 0x4010,
5505 0x4080, 0x4090, 0x43c0, 0x4458,
5506 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5507 0x4fc0, 0x5010, 0x53c0, 0x5444,
5508 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5509 0x5fc0, 0x6000, 0x6400, 0x6428,
5510 0x6800, 0x6848, 0x684c, 0x6860,
5511 0x6888, 0x6910, 0x8000 };
5515 memset(p, 0, BNX2_REGDUMP_LEN);
5517 if (!netif_running(bp->dev))
5521 offset = reg_boundaries[0];
5523 while (offset < BNX2_REGDUMP_LEN) {
5524 *p++ = REG_RD(bp, offset);
5526 if (offset == reg_boundaries[i + 1]) {
5527 offset = reg_boundaries[i + 2];
5528 p = (u32 *) (orig_p + offset);
5535 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5537 struct bnx2 *bp = netdev_priv(dev);
5539 if (bp->flags & NO_WOL_FLAG) {
5544 wol->supported = WAKE_MAGIC;
5546 wol->wolopts = WAKE_MAGIC;
5550 memset(&wol->sopass, 0, sizeof(wol->sopass));
5554 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5556 struct bnx2 *bp = netdev_priv(dev);
5558 if (wol->wolopts & ~WAKE_MAGIC)
5561 if (wol->wolopts & WAKE_MAGIC) {
5562 if (bp->flags & NO_WOL_FLAG)
5574 bnx2_nway_reset(struct net_device *dev)
5576 struct bnx2 *bp = netdev_priv(dev);
5579 if (!(bp->autoneg & AUTONEG_SPEED)) {
5583 spin_lock_bh(&bp->phy_lock);
5585 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5588 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5589 spin_unlock_bh(&bp->phy_lock);
5593 /* Force a link down visible on the other side */
5594 if (bp->phy_flags & PHY_SERDES_FLAG) {
5595 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5596 spin_unlock_bh(&bp->phy_lock);
5600 spin_lock_bh(&bp->phy_lock);
5602 bp->current_interval = SERDES_AN_TIMEOUT;
5603 bp->serdes_an_pending = 1;
5604 mod_timer(&bp->timer, jiffies + bp->current_interval);
5607 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5608 bmcr &= ~BMCR_LOOPBACK;
5609 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5611 spin_unlock_bh(&bp->phy_lock);
5617 bnx2_get_eeprom_len(struct net_device *dev)
5619 struct bnx2 *bp = netdev_priv(dev);
5621 if (bp->flash_info == NULL)
5624 return (int) bp->flash_size;
5628 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5631 struct bnx2 *bp = netdev_priv(dev);
5634 /* parameters already validated in ethtool_get_eeprom */
5636 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5642 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5645 struct bnx2 *bp = netdev_priv(dev);
5648 /* parameters already validated in ethtool_set_eeprom */
5650 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5656 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5658 struct bnx2 *bp = netdev_priv(dev);
5660 memset(coal, 0, sizeof(struct ethtool_coalesce));
5662 coal->rx_coalesce_usecs = bp->rx_ticks;
5663 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5664 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5665 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5667 coal->tx_coalesce_usecs = bp->tx_ticks;
5668 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5669 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5670 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5672 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5678 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5680 struct bnx2 *bp = netdev_priv(dev);
5682 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5683 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5685 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5686 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5688 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5689 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5691 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5692 if (bp->rx_quick_cons_trip_int > 0xff)
5693 bp->rx_quick_cons_trip_int = 0xff;
5695 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5696 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5698 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5699 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5701 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5702 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5704 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5705 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5708 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5709 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5710 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5711 bp->stats_ticks = USEC_PER_SEC;
5713 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
5714 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5715 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5717 if (netif_running(bp->dev)) {
5718 bnx2_netif_stop(bp);
5720 bnx2_netif_start(bp);
5727 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5729 struct bnx2 *bp = netdev_priv(dev);
5731 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5732 ering->rx_mini_max_pending = 0;
5733 ering->rx_jumbo_max_pending = 0;
5735 ering->rx_pending = bp->rx_ring_size;
5736 ering->rx_mini_pending = 0;
5737 ering->rx_jumbo_pending = 0;
5739 ering->tx_max_pending = MAX_TX_DESC_CNT;
5740 ering->tx_pending = bp->tx_ring_size;
5744 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5746 struct bnx2 *bp = netdev_priv(dev);
5748 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5749 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5750 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5754 if (netif_running(bp->dev)) {
5755 bnx2_netif_stop(bp);
5756 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5761 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5762 bp->tx_ring_size = ering->tx_pending;
5764 if (netif_running(bp->dev)) {
5767 rc = bnx2_alloc_mem(bp);
5771 bnx2_netif_start(bp);
5778 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5780 struct bnx2 *bp = netdev_priv(dev);
5782 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5783 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5784 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5788 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5790 struct bnx2 *bp = netdev_priv(dev);
5792 bp->req_flow_ctrl = 0;
5793 if (epause->rx_pause)
5794 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5795 if (epause->tx_pause)
5796 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5798 if (epause->autoneg) {
5799 bp->autoneg |= AUTONEG_FLOW_CTRL;
5802 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5805 spin_lock_bh(&bp->phy_lock);
5807 bnx2_setup_phy(bp, bp->phy_port);
5809 spin_unlock_bh(&bp->phy_lock);
5815 bnx2_get_rx_csum(struct net_device *dev)
5817 struct bnx2 *bp = netdev_priv(dev);
5823 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5825 struct bnx2 *bp = netdev_priv(dev);
5832 bnx2_set_tso(struct net_device *dev, u32 data)
5834 struct bnx2 *bp = netdev_priv(dev);
5837 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5838 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5839 dev->features |= NETIF_F_TSO6;
5841 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5846 #define BNX2_NUM_STATS 46
5849 char string[ETH_GSTRING_LEN];
5850 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5852 { "rx_error_bytes" },
5854 { "tx_error_bytes" },
5855 { "rx_ucast_packets" },
5856 { "rx_mcast_packets" },
5857 { "rx_bcast_packets" },
5858 { "tx_ucast_packets" },
5859 { "tx_mcast_packets" },
5860 { "tx_bcast_packets" },
5861 { "tx_mac_errors" },
5862 { "tx_carrier_errors" },
5863 { "rx_crc_errors" },
5864 { "rx_align_errors" },
5865 { "tx_single_collisions" },
5866 { "tx_multi_collisions" },
5868 { "tx_excess_collisions" },
5869 { "tx_late_collisions" },
5870 { "tx_total_collisions" },
5873 { "rx_undersize_packets" },
5874 { "rx_oversize_packets" },
5875 { "rx_64_byte_packets" },
5876 { "rx_65_to_127_byte_packets" },
5877 { "rx_128_to_255_byte_packets" },
5878 { "rx_256_to_511_byte_packets" },
5879 { "rx_512_to_1023_byte_packets" },
5880 { "rx_1024_to_1522_byte_packets" },
5881 { "rx_1523_to_9022_byte_packets" },
5882 { "tx_64_byte_packets" },
5883 { "tx_65_to_127_byte_packets" },
5884 { "tx_128_to_255_byte_packets" },
5885 { "tx_256_to_511_byte_packets" },
5886 { "tx_512_to_1023_byte_packets" },
5887 { "tx_1024_to_1522_byte_packets" },
5888 { "tx_1523_to_9022_byte_packets" },
5889 { "rx_xon_frames" },
5890 { "rx_xoff_frames" },
5891 { "tx_xon_frames" },
5892 { "tx_xoff_frames" },
5893 { "rx_mac_ctrl_frames" },
5894 { "rx_filtered_packets" },
5896 { "rx_fw_discards" },
5899 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5901 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5902 STATS_OFFSET32(stat_IfHCInOctets_hi),
5903 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5904 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5905 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5906 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5907 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5908 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5909 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5910 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5911 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5912 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5913 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5914 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5915 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5916 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5917 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5918 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5919 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5920 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5921 STATS_OFFSET32(stat_EtherStatsCollisions),
5922 STATS_OFFSET32(stat_EtherStatsFragments),
5923 STATS_OFFSET32(stat_EtherStatsJabbers),
5924 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5925 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5926 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5927 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5928 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5929 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5930 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5931 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5932 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5933 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5934 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5935 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5936 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5937 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5938 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5939 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5940 STATS_OFFSET32(stat_XonPauseFramesReceived),
5941 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5942 STATS_OFFSET32(stat_OutXonSent),
5943 STATS_OFFSET32(stat_OutXoffSent),
5944 STATS_OFFSET32(stat_MacControlFramesReceived),
5945 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5946 STATS_OFFSET32(stat_IfInMBUFDiscards),
5947 STATS_OFFSET32(stat_FwRxDrop),
5950 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5951 * skipped because of errata.
5953 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5954 8,0,8,8,8,8,8,8,8,8,
5955 4,0,4,4,4,4,4,4,4,4,
5956 4,4,4,4,4,4,4,4,4,4,
5957 4,4,4,4,4,4,4,4,4,4,
5961 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5962 8,0,8,8,8,8,8,8,8,8,
5963 4,4,4,4,4,4,4,4,4,4,
5964 4,4,4,4,4,4,4,4,4,4,
5965 4,4,4,4,4,4,4,4,4,4,
5969 #define BNX2_NUM_TESTS 6
5972 char string[ETH_GSTRING_LEN];
5973 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5974 { "register_test (offline)" },
5975 { "memory_test (offline)" },
5976 { "loopback_test (offline)" },
5977 { "nvram_test (online)" },
5978 { "interrupt_test (online)" },
5979 { "link_test (online)" },
5983 bnx2_get_sset_count(struct net_device *dev, int sset)
5987 return BNX2_NUM_TESTS;
5989 return BNX2_NUM_STATS;
5996 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5998 struct bnx2 *bp = netdev_priv(dev);
6000 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6001 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6004 bnx2_netif_stop(bp);
6005 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6008 if (bnx2_test_registers(bp) != 0) {
6010 etest->flags |= ETH_TEST_FL_FAILED;
6012 if (bnx2_test_memory(bp) != 0) {
6014 etest->flags |= ETH_TEST_FL_FAILED;
6016 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6017 etest->flags |= ETH_TEST_FL_FAILED;
6019 if (!netif_running(bp->dev)) {
6020 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6024 bnx2_netif_start(bp);
6027 /* wait for link up */
6028 for (i = 0; i < 7; i++) {
6031 msleep_interruptible(1000);
6035 if (bnx2_test_nvram(bp) != 0) {
6037 etest->flags |= ETH_TEST_FL_FAILED;
6039 if (bnx2_test_intr(bp) != 0) {
6041 etest->flags |= ETH_TEST_FL_FAILED;
6044 if (bnx2_test_link(bp) != 0) {
6046 etest->flags |= ETH_TEST_FL_FAILED;
6052 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6054 switch (stringset) {
6056 memcpy(buf, bnx2_stats_str_arr,
6057 sizeof(bnx2_stats_str_arr));
6060 memcpy(buf, bnx2_tests_str_arr,
6061 sizeof(bnx2_tests_str_arr));
6067 bnx2_get_ethtool_stats(struct net_device *dev,
6068 struct ethtool_stats *stats, u64 *buf)
6070 struct bnx2 *bp = netdev_priv(dev);
6072 u32 *hw_stats = (u32 *) bp->stats_blk;
6073 u8 *stats_len_arr = NULL;
6075 if (hw_stats == NULL) {
6076 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6080 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6081 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6082 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6083 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6084 stats_len_arr = bnx2_5706_stats_len_arr;
6086 stats_len_arr = bnx2_5708_stats_len_arr;
6088 for (i = 0; i < BNX2_NUM_STATS; i++) {
6089 if (stats_len_arr[i] == 0) {
6090 /* skip this counter */
6094 if (stats_len_arr[i] == 4) {
6095 /* 4-byte counter */
6097 *(hw_stats + bnx2_stats_offset_arr[i]);
6100 /* 8-byte counter */
6101 buf[i] = (((u64) *(hw_stats +
6102 bnx2_stats_offset_arr[i])) << 32) +
6103 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6108 bnx2_phys_id(struct net_device *dev, u32 data)
6110 struct bnx2 *bp = netdev_priv(dev);
6117 save = REG_RD(bp, BNX2_MISC_CFG);
6118 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6120 for (i = 0; i < (data * 2); i++) {
6122 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6125 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6126 BNX2_EMAC_LED_1000MB_OVERRIDE |
6127 BNX2_EMAC_LED_100MB_OVERRIDE |
6128 BNX2_EMAC_LED_10MB_OVERRIDE |
6129 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6130 BNX2_EMAC_LED_TRAFFIC);
6132 msleep_interruptible(500);
6133 if (signal_pending(current))
6136 REG_WR(bp, BNX2_EMAC_LED, 0);
6137 REG_WR(bp, BNX2_MISC_CFG, save);
6142 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6144 struct bnx2 *bp = netdev_priv(dev);
6146 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6147 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6149 return (ethtool_op_set_tx_csum(dev, data));
6152 static const struct ethtool_ops bnx2_ethtool_ops = {
6153 .get_settings = bnx2_get_settings,
6154 .set_settings = bnx2_set_settings,
6155 .get_drvinfo = bnx2_get_drvinfo,
6156 .get_regs_len = bnx2_get_regs_len,
6157 .get_regs = bnx2_get_regs,
6158 .get_wol = bnx2_get_wol,
6159 .set_wol = bnx2_set_wol,
6160 .nway_reset = bnx2_nway_reset,
6161 .get_link = ethtool_op_get_link,
6162 .get_eeprom_len = bnx2_get_eeprom_len,
6163 .get_eeprom = bnx2_get_eeprom,
6164 .set_eeprom = bnx2_set_eeprom,
6165 .get_coalesce = bnx2_get_coalesce,
6166 .set_coalesce = bnx2_set_coalesce,
6167 .get_ringparam = bnx2_get_ringparam,
6168 .set_ringparam = bnx2_set_ringparam,
6169 .get_pauseparam = bnx2_get_pauseparam,
6170 .set_pauseparam = bnx2_set_pauseparam,
6171 .get_rx_csum = bnx2_get_rx_csum,
6172 .set_rx_csum = bnx2_set_rx_csum,
6173 .set_tx_csum = bnx2_set_tx_csum,
6174 .set_sg = ethtool_op_set_sg,
6175 .set_tso = bnx2_set_tso,
6176 .self_test = bnx2_self_test,
6177 .get_strings = bnx2_get_strings,
6178 .phys_id = bnx2_phys_id,
6179 .get_ethtool_stats = bnx2_get_ethtool_stats,
6180 .get_sset_count = bnx2_get_sset_count,
6183 /* Called with rtnl_lock */
6185 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6187 struct mii_ioctl_data *data = if_mii(ifr);
6188 struct bnx2 *bp = netdev_priv(dev);
6193 data->phy_id = bp->phy_addr;
6199 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6202 if (!netif_running(dev))
6205 spin_lock_bh(&bp->phy_lock);
6206 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6207 spin_unlock_bh(&bp->phy_lock);
6209 data->val_out = mii_regval;
6215 if (!capable(CAP_NET_ADMIN))
6218 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6221 if (!netif_running(dev))
6224 spin_lock_bh(&bp->phy_lock);
6225 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6226 spin_unlock_bh(&bp->phy_lock);
6237 /* Called with rtnl_lock */
6239 bnx2_change_mac_addr(struct net_device *dev, void *p)
6241 struct sockaddr *addr = p;
6242 struct bnx2 *bp = netdev_priv(dev);
6244 if (!is_valid_ether_addr(addr->sa_data))
6247 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6248 if (netif_running(dev))
6249 bnx2_set_mac_addr(bp);
6254 /* Called with rtnl_lock */
6256 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6258 struct bnx2 *bp = netdev_priv(dev);
6260 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6261 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6265 if (netif_running(dev)) {
6266 bnx2_netif_stop(bp);
6270 bnx2_netif_start(bp);
6275 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6277 poll_bnx2(struct net_device *dev)
6279 struct bnx2 *bp = netdev_priv(dev);
6281 disable_irq(bp->pdev->irq);
6282 bnx2_interrupt(bp->pdev->irq, dev);
6283 enable_irq(bp->pdev->irq);
6287 static void __devinit
6288 bnx2_get_5709_media(struct bnx2 *bp)
6290 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6291 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6294 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6296 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6297 bp->phy_flags |= PHY_SERDES_FLAG;
6301 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6302 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6304 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6306 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6311 bp->phy_flags |= PHY_SERDES_FLAG;
6319 bp->phy_flags |= PHY_SERDES_FLAG;
6325 static void __devinit
6326 bnx2_get_pci_speed(struct bnx2 *bp)
6330 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6331 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6334 bp->flags |= PCIX_FLAG;
6336 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6338 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6340 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6341 bp->bus_speed_mhz = 133;
6344 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6345 bp->bus_speed_mhz = 100;
6348 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6349 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6350 bp->bus_speed_mhz = 66;
6353 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6354 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6355 bp->bus_speed_mhz = 50;
6358 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6359 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6360 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6361 bp->bus_speed_mhz = 33;
6366 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6367 bp->bus_speed_mhz = 66;
6369 bp->bus_speed_mhz = 33;
6372 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6373 bp->flags |= PCI_32BIT_FLAG;
6377 static int __devinit
6378 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6381 unsigned long mem_len;
6384 u64 dma_mask, persist_dma_mask;
6386 SET_NETDEV_DEV(dev, &pdev->dev);
6387 bp = netdev_priv(dev);
6392 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6393 rc = pci_enable_device(pdev);
6395 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
6399 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6401 "Cannot find PCI device base address, aborting.\n");
6403 goto err_out_disable;
6406 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6408 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6409 goto err_out_disable;
6412 pci_set_master(pdev);
6414 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6415 if (bp->pm_cap == 0) {
6417 "Cannot find power management capability, aborting.\n");
6419 goto err_out_release;
6425 spin_lock_init(&bp->phy_lock);
6426 spin_lock_init(&bp->indirect_lock);
6427 INIT_WORK(&bp->reset_task, bnx2_reset_task);
6429 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6430 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6431 dev->mem_end = dev->mem_start + mem_len;
6432 dev->irq = pdev->irq;
6434 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6437 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6439 goto err_out_release;
6442 /* Configure byte swap and enable write to the reg_window registers.
6443 * Rely on CPU to do target byte swapping on big endian systems
6444 * The chip's target access swapping will not swap all accesses
6446 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6447 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6448 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6450 bnx2_set_power_state(bp, PCI_D0);
6452 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6454 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6455 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6457 "Cannot find PCIE capability, aborting.\n");
6461 bp->flags |= PCIE_FLAG;
6463 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6464 if (bp->pcix_cap == 0) {
6466 "Cannot find PCIX capability, aborting.\n");
6472 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6473 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6474 bp->flags |= MSI_CAP_FLAG;
6477 /* 5708 cannot support DMA addresses > 40-bit. */
6478 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6479 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6481 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6483 /* Configure DMA attributes. */
6484 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6485 dev->features |= NETIF_F_HIGHDMA;
6486 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6489 "pci_set_consistent_dma_mask failed, aborting.\n");
6492 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6493 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6497 if (!(bp->flags & PCIE_FLAG))
6498 bnx2_get_pci_speed(bp);
6500 /* 5706A0 may falsely detect SERR and PERR. */
6501 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6502 reg = REG_RD(bp, PCI_COMMAND);
6503 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6504 REG_WR(bp, PCI_COMMAND, reg);
6506 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6507 !(bp->flags & PCIX_FLAG)) {
6510 "5706 A1 can only be used in a PCIX bus, aborting.\n");
6514 bnx2_init_nvram(bp);
6516 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6518 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6519 BNX2_SHM_HDR_SIGNATURE_SIG) {
6520 u32 off = PCI_FUNC(pdev->devfn) << 2;
6522 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6524 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6526 /* Get the permanent MAC address. First we need to make sure the
6527 * firmware is actually running.
6529 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6531 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6532 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6533 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6538 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6539 for (i = 0, j = 0; i < 3; i++) {
6542 num = (u8) (reg >> (24 - (i * 8)));
6543 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6544 if (num >= k || !skip0 || k == 1) {
6545 bp->fw_version[j++] = (num / k) + '0';
6550 bp->fw_version[j++] = '.';
6552 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
6553 BNX2_PORT_FEATURE_ASF_ENABLED) {
6554 bp->flags |= ASF_ENABLE_FLAG;
6556 for (i = 0; i < 30; i++) {
6557 reg = REG_RD_IND(bp, bp->shmem_base +
6558 BNX2_BC_STATE_CONDITION);
6559 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
6564 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6565 reg &= BNX2_CONDITION_MFW_RUN_MASK;
6566 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6567 reg != BNX2_CONDITION_MFW_RUN_NONE) {
6569 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6571 bp->fw_version[j++] = ' ';
6572 for (i = 0; i < 3; i++) {
6573 reg = REG_RD_IND(bp, addr + i * 4);
6575 memcpy(&bp->fw_version[j], ®, 4);
6580 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6581 bp->mac_addr[0] = (u8) (reg >> 8);
6582 bp->mac_addr[1] = (u8) reg;
6584 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6585 bp->mac_addr[2] = (u8) (reg >> 24);
6586 bp->mac_addr[3] = (u8) (reg >> 16);
6587 bp->mac_addr[4] = (u8) (reg >> 8);
6588 bp->mac_addr[5] = (u8) reg;
6590 bp->tx_ring_size = MAX_TX_DESC_CNT;
6591 bnx2_set_rx_ring_size(bp, 255);
6595 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6597 bp->tx_quick_cons_trip_int = 20;
6598 bp->tx_quick_cons_trip = 20;
6599 bp->tx_ticks_int = 80;
6602 bp->rx_quick_cons_trip_int = 6;
6603 bp->rx_quick_cons_trip = 6;
6604 bp->rx_ticks_int = 18;
6607 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6609 bp->timer_interval = HZ;
6610 bp->current_interval = HZ;
6614 /* Disable WOL support if we are running on a SERDES chip. */
6615 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6616 bnx2_get_5709_media(bp);
6617 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6618 bp->phy_flags |= PHY_SERDES_FLAG;
6620 bp->phy_port = PORT_TP;
6621 if (bp->phy_flags & PHY_SERDES_FLAG) {
6622 bp->phy_port = PORT_FIBRE;
6623 bp->flags |= NO_WOL_FLAG;
6624 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6626 reg = REG_RD_IND(bp, bp->shmem_base +
6627 BNX2_SHARED_HW_CFG_CONFIG);
6628 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6629 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6631 bnx2_init_remote_phy(bp);
6633 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6634 CHIP_NUM(bp) == CHIP_NUM_5708)
6635 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6636 else if (CHIP_ID(bp) == CHIP_ID_5709_A0 ||
6637 CHIP_ID(bp) == CHIP_ID_5709_A1)
6638 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6640 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6641 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6642 (CHIP_ID(bp) == CHIP_ID_5708_B1))
6643 bp->flags |= NO_WOL_FLAG;
6645 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6646 bp->tx_quick_cons_trip_int =
6647 bp->tx_quick_cons_trip;
6648 bp->tx_ticks_int = bp->tx_ticks;
6649 bp->rx_quick_cons_trip_int =
6650 bp->rx_quick_cons_trip;
6651 bp->rx_ticks_int = bp->rx_ticks;
6652 bp->comp_prod_trip_int = bp->comp_prod_trip;
6653 bp->com_ticks_int = bp->com_ticks;
6654 bp->cmd_ticks_int = bp->cmd_ticks;
6657 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6659 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6660 * with byte enables disabled on the unused 32-bit word. This is legal
6661 * but causes problems on the AMD 8132 which will eventually stop
6662 * responding after a while.
6664 * AMD believes this incompatibility is unique to the 5706, and
6665 * prefers to locally disable MSI rather than globally disabling it.
6667 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6668 struct pci_dev *amd_8132 = NULL;
6670 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6671 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6674 if (amd_8132->revision >= 0x10 &&
6675 amd_8132->revision <= 0x13) {
6677 pci_dev_put(amd_8132);
6683 bnx2_set_default_link(bp);
6684 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6686 init_timer(&bp->timer);
6687 bp->timer.expires = RUN_AT(bp->timer_interval);
6688 bp->timer.data = (unsigned long) bp;
6689 bp->timer.function = bnx2_timer;
6695 iounmap(bp->regview);
6700 pci_release_regions(pdev);
6703 pci_disable_device(pdev);
6704 pci_set_drvdata(pdev, NULL);
6710 static char * __devinit
6711 bnx2_bus_string(struct bnx2 *bp, char *str)
6715 if (bp->flags & PCIE_FLAG) {
6716 s += sprintf(s, "PCI Express");
6718 s += sprintf(s, "PCI");
6719 if (bp->flags & PCIX_FLAG)
6720 s += sprintf(s, "-X");
6721 if (bp->flags & PCI_32BIT_FLAG)
6722 s += sprintf(s, " 32-bit");
6724 s += sprintf(s, " 64-bit");
6725 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6730 static int __devinit
6731 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6733 static int version_printed = 0;
6734 struct net_device *dev = NULL;
6738 DECLARE_MAC_BUF(mac);
6740 if (version_printed++ == 0)
6741 printk(KERN_INFO "%s", version);
6743 /* dev zeroed in init_etherdev */
6744 dev = alloc_etherdev(sizeof(*bp));
6749 rc = bnx2_init_board(pdev, dev);
6755 dev->open = bnx2_open;
6756 dev->hard_start_xmit = bnx2_start_xmit;
6757 dev->stop = bnx2_close;
6758 dev->get_stats = bnx2_get_stats;
6759 dev->set_multicast_list = bnx2_set_rx_mode;
6760 dev->do_ioctl = bnx2_ioctl;
6761 dev->set_mac_address = bnx2_change_mac_addr;
6762 dev->change_mtu = bnx2_change_mtu;
6763 dev->tx_timeout = bnx2_tx_timeout;
6764 dev->watchdog_timeo = TX_TIMEOUT;
6766 dev->vlan_rx_register = bnx2_vlan_rx_register;
6768 dev->ethtool_ops = &bnx2_ethtool_ops;
6770 bp = netdev_priv(dev);
6771 netif_napi_add(dev, &bp->napi, bnx2_poll, 64);
6773 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6774 dev->poll_controller = poll_bnx2;
6777 pci_set_drvdata(pdev, dev);
6779 memcpy(dev->dev_addr, bp->mac_addr, 6);
6780 memcpy(dev->perm_addr, bp->mac_addr, 6);
6781 bp->name = board_info[ent->driver_data].name;
6783 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6784 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6785 dev->features |= NETIF_F_IPV6_CSUM;
6788 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6790 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6791 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6792 dev->features |= NETIF_F_TSO6;
6794 if ((rc = register_netdev(dev))) {
6795 dev_err(&pdev->dev, "Cannot register net device\n");
6797 iounmap(bp->regview);
6798 pci_release_regions(pdev);
6799 pci_disable_device(pdev);
6800 pci_set_drvdata(pdev, NULL);
6805 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
6806 "IRQ %d, node addr %s\n",
6809 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6810 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6811 bnx2_bus_string(bp, str),
6813 bp->pdev->irq, print_mac(mac, dev->dev_addr));
6818 static void __devexit
6819 bnx2_remove_one(struct pci_dev *pdev)
6821 struct net_device *dev = pci_get_drvdata(pdev);
6822 struct bnx2 *bp = netdev_priv(dev);
6824 flush_scheduled_work();
6826 unregister_netdev(dev);
6829 iounmap(bp->regview);
6832 pci_release_regions(pdev);
6833 pci_disable_device(pdev);
6834 pci_set_drvdata(pdev, NULL);
6838 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6840 struct net_device *dev = pci_get_drvdata(pdev);
6841 struct bnx2 *bp = netdev_priv(dev);
6844 /* PCI register 4 needs to be saved whether netif_running() or not.
6845 * MSI address and data need to be saved if using MSI and
6848 pci_save_state(pdev);
6849 if (!netif_running(dev))
6852 flush_scheduled_work();
6853 bnx2_netif_stop(bp);
6854 netif_device_detach(dev);
6855 del_timer_sync(&bp->timer);
6856 if (bp->flags & NO_WOL_FLAG)
6857 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6859 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6861 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6862 bnx2_reset_chip(bp, reset_code);
6864 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6869 bnx2_resume(struct pci_dev *pdev)
6871 struct net_device *dev = pci_get_drvdata(pdev);
6872 struct bnx2 *bp = netdev_priv(dev);
6874 pci_restore_state(pdev);
6875 if (!netif_running(dev))
6878 bnx2_set_power_state(bp, PCI_D0);
6879 netif_device_attach(dev);
6881 bnx2_netif_start(bp);
6885 static struct pci_driver bnx2_pci_driver = {
6886 .name = DRV_MODULE_NAME,
6887 .id_table = bnx2_pci_tbl,
6888 .probe = bnx2_init_one,
6889 .remove = __devexit_p(bnx2_remove_one),
6890 .suspend = bnx2_suspend,
6891 .resume = bnx2_resume,
6894 static int __init bnx2_init(void)
6896 return pci_register_driver(&bnx2_pci_driver);
6899 static void __exit bnx2_cleanup(void)
6901 pci_unregister_driver(&bnx2_pci_driver);
6904 module_init(bnx2_init);
6905 module_exit(bnx2_cleanup);