1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define FW_BUF_SIZE 0x10000
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 #define DRV_MODULE_VERSION "1.7.4"
60 #define DRV_MODULE_RELDATE "February 18, 2008"
62 #define RUN_AT(x) (jiffies + (x))
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (5*HZ)
67 static char version[] __devinitdata =
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
75 static int disable_msi = 0;
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92 /* indexed by board_t, above */
95 } board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
107 static struct pci_device_id bnx2_pci_tbl[] = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
129 static struct flash_spec flash_table[] =
131 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
134 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
138 /* Expansion entry 0001 */
139 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
143 /* Saifun SA25F010 (non-buffered flash) */
144 /* strap, cfg1, & write1 need updates */
145 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148 "Non-buffered flash (128kB)"},
149 /* Saifun SA25F020 (non-buffered flash) */
150 /* strap, cfg1, & write1 need updates */
151 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154 "Non-buffered flash (256kB)"},
155 /* Expansion entry 0100 */
156 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
160 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170 /* Saifun SA25F005 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175 "Non-buffered flash (64kB)"},
177 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
181 /* Expansion entry 1001 */
182 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 /* Expansion entry 1010 */
187 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
191 /* ATMEL AT45DB011B (buffered flash) */
192 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195 "Buffered flash (128kB)"},
196 /* Expansion entry 1100 */
197 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 /* Expansion entry 1101 */
202 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206 /* Ateml Expansion entry 1110 */
207 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1110 (Atmel)"},
211 /* ATMEL AT45DB021B (buffered flash) */
212 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215 "Buffered flash (256kB)"},
218 static struct flash_spec flash_5709 = {
219 .flags = BNX2_NV_BUFFERED,
220 .page_bits = BCM5709_FLASH_PAGE_BITS,
221 .page_size = BCM5709_FLASH_PAGE_SIZE,
222 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
223 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
224 .name = "5709 Buffered flash (256kB)",
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi)
235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped.
238 diff = bp->tx_prod - bnapi->tx_cons;
239 if (unlikely(diff >= TX_DESC_CNT)) {
241 if (diff == TX_DESC_CNT)
242 diff = MAX_TX_DESC_CNT;
244 return (bp->tx_ring_size - diff);
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
252 spin_lock_bh(&bp->indirect_lock);
253 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255 spin_unlock_bh(&bp->indirect_lock);
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
262 spin_lock_bh(&bp->indirect_lock);
263 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265 spin_unlock_bh(&bp->indirect_lock);
269 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
271 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
275 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
277 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
281 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
284 spin_lock_bh(&bp->indirect_lock);
285 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
288 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
289 REG_WR(bp, BNX2_CTX_CTX_CTRL,
290 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
291 for (i = 0; i < 5; i++) {
293 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
294 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
299 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
300 REG_WR(bp, BNX2_CTX_DATA, val);
302 spin_unlock_bh(&bp->indirect_lock);
306 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
311 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
312 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
313 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
315 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
316 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
321 val1 = (bp->phy_addr << 21) | (reg << 16) |
322 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
323 BNX2_EMAC_MDIO_COMM_START_BUSY;
324 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
326 for (i = 0; i < 50; i++) {
329 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
330 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
333 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
334 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
340 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
349 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
350 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
351 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
353 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
354 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
363 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
368 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
369 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
370 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
372 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
373 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
378 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
379 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
380 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
381 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
383 for (i = 0; i < 50; i++) {
386 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
387 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
393 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
398 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
399 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
400 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
402 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
403 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
412 bnx2_disable_int(struct bnx2 *bp)
415 struct bnx2_napi *bnapi;
417 for (i = 0; i < bp->irq_nvecs; i++) {
418 bnapi = &bp->bnx2_napi[i];
419 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
420 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
422 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
426 bnx2_enable_int(struct bnx2 *bp)
429 struct bnx2_napi *bnapi;
431 for (i = 0; i < bp->irq_nvecs; i++) {
432 bnapi = &bp->bnx2_napi[i];
434 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
435 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
436 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
437 bnapi->last_status_idx);
439 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
440 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
441 bnapi->last_status_idx);
443 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
447 bnx2_disable_int_sync(struct bnx2 *bp)
451 atomic_inc(&bp->intr_sem);
452 bnx2_disable_int(bp);
453 for (i = 0; i < bp->irq_nvecs; i++)
454 synchronize_irq(bp->irq_tbl[i].vector);
458 bnx2_napi_disable(struct bnx2 *bp)
462 for (i = 0; i < bp->irq_nvecs; i++)
463 napi_disable(&bp->bnx2_napi[i].napi);
467 bnx2_napi_enable(struct bnx2 *bp)
471 for (i = 0; i < bp->irq_nvecs; i++)
472 napi_enable(&bp->bnx2_napi[i].napi);
476 bnx2_netif_stop(struct bnx2 *bp)
478 bnx2_disable_int_sync(bp);
479 if (netif_running(bp->dev)) {
480 bnx2_napi_disable(bp);
481 netif_tx_disable(bp->dev);
482 bp->dev->trans_start = jiffies; /* prevent tx timeout */
487 bnx2_netif_start(struct bnx2 *bp)
489 if (atomic_dec_and_test(&bp->intr_sem)) {
490 if (netif_running(bp->dev)) {
491 netif_wake_queue(bp->dev);
492 bnx2_napi_enable(bp);
499 bnx2_free_mem(struct bnx2 *bp)
503 for (i = 0; i < bp->ctx_pages; i++) {
504 if (bp->ctx_blk[i]) {
505 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
507 bp->ctx_blk_mapping[i]);
508 bp->ctx_blk[i] = NULL;
511 if (bp->status_blk) {
512 pci_free_consistent(bp->pdev, bp->status_stats_size,
513 bp->status_blk, bp->status_blk_mapping);
514 bp->status_blk = NULL;
515 bp->stats_blk = NULL;
517 if (bp->tx_desc_ring) {
518 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
519 bp->tx_desc_ring, bp->tx_desc_mapping);
520 bp->tx_desc_ring = NULL;
522 kfree(bp->tx_buf_ring);
523 bp->tx_buf_ring = NULL;
524 for (i = 0; i < bp->rx_max_ring; i++) {
525 if (bp->rx_desc_ring[i])
526 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
528 bp->rx_desc_mapping[i]);
529 bp->rx_desc_ring[i] = NULL;
531 vfree(bp->rx_buf_ring);
532 bp->rx_buf_ring = NULL;
533 for (i = 0; i < bp->rx_max_pg_ring; i++) {
534 if (bp->rx_pg_desc_ring[i])
535 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
536 bp->rx_pg_desc_ring[i],
537 bp->rx_pg_desc_mapping[i]);
538 bp->rx_pg_desc_ring[i] = NULL;
541 vfree(bp->rx_pg_ring);
542 bp->rx_pg_ring = NULL;
546 bnx2_alloc_mem(struct bnx2 *bp)
548 int i, status_blk_size;
550 bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
551 if (bp->tx_buf_ring == NULL)
554 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
555 &bp->tx_desc_mapping);
556 if (bp->tx_desc_ring == NULL)
559 bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
560 if (bp->rx_buf_ring == NULL)
563 memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
565 for (i = 0; i < bp->rx_max_ring; i++) {
566 bp->rx_desc_ring[i] =
567 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
568 &bp->rx_desc_mapping[i]);
569 if (bp->rx_desc_ring[i] == NULL)
574 if (bp->rx_pg_ring_size) {
575 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
577 if (bp->rx_pg_ring == NULL)
580 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
584 for (i = 0; i < bp->rx_max_pg_ring; i++) {
585 bp->rx_pg_desc_ring[i] =
586 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
587 &bp->rx_pg_desc_mapping[i]);
588 if (bp->rx_pg_desc_ring[i] == NULL)
593 /* Combine status and statistics blocks into one allocation. */
594 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
595 if (bp->flags & BNX2_FLAG_MSIX_CAP)
596 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
597 BNX2_SBLK_MSIX_ALIGN_SIZE);
598 bp->status_stats_size = status_blk_size +
599 sizeof(struct statistics_block);
601 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
602 &bp->status_blk_mapping);
603 if (bp->status_blk == NULL)
606 memset(bp->status_blk, 0, bp->status_stats_size);
608 bp->bnx2_napi[0].status_blk = bp->status_blk;
609 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
610 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
611 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
613 bnapi->status_blk_msix = (void *)
614 ((unsigned long) bp->status_blk +
615 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
616 bnapi->int_num = i << 24;
620 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
623 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
625 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
626 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
627 if (bp->ctx_pages == 0)
629 for (i = 0; i < bp->ctx_pages; i++) {
630 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
632 &bp->ctx_blk_mapping[i]);
633 if (bp->ctx_blk[i] == NULL)
645 bnx2_report_fw_link(struct bnx2 *bp)
647 u32 fw_link_status = 0;
649 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
655 switch (bp->line_speed) {
657 if (bp->duplex == DUPLEX_HALF)
658 fw_link_status = BNX2_LINK_STATUS_10HALF;
660 fw_link_status = BNX2_LINK_STATUS_10FULL;
663 if (bp->duplex == DUPLEX_HALF)
664 fw_link_status = BNX2_LINK_STATUS_100HALF;
666 fw_link_status = BNX2_LINK_STATUS_100FULL;
669 if (bp->duplex == DUPLEX_HALF)
670 fw_link_status = BNX2_LINK_STATUS_1000HALF;
672 fw_link_status = BNX2_LINK_STATUS_1000FULL;
675 if (bp->duplex == DUPLEX_HALF)
676 fw_link_status = BNX2_LINK_STATUS_2500HALF;
678 fw_link_status = BNX2_LINK_STATUS_2500FULL;
682 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
685 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
687 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
688 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
690 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
691 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
692 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
694 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
698 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
700 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
704 bnx2_xceiver_str(struct bnx2 *bp)
706 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
707 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
712 bnx2_report_link(struct bnx2 *bp)
715 netif_carrier_on(bp->dev);
716 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
717 bnx2_xceiver_str(bp));
719 printk("%d Mbps ", bp->line_speed);
721 if (bp->duplex == DUPLEX_FULL)
722 printk("full duplex");
724 printk("half duplex");
727 if (bp->flow_ctrl & FLOW_CTRL_RX) {
728 printk(", receive ");
729 if (bp->flow_ctrl & FLOW_CTRL_TX)
730 printk("& transmit ");
733 printk(", transmit ");
735 printk("flow control ON");
740 netif_carrier_off(bp->dev);
741 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
742 bnx2_xceiver_str(bp));
745 bnx2_report_fw_link(bp);
749 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
751 u32 local_adv, remote_adv;
754 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
755 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
757 if (bp->duplex == DUPLEX_FULL) {
758 bp->flow_ctrl = bp->req_flow_ctrl;
763 if (bp->duplex != DUPLEX_FULL) {
767 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
768 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
771 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
772 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
773 bp->flow_ctrl |= FLOW_CTRL_TX;
774 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
775 bp->flow_ctrl |= FLOW_CTRL_RX;
779 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
780 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
782 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
783 u32 new_local_adv = 0;
784 u32 new_remote_adv = 0;
786 if (local_adv & ADVERTISE_1000XPAUSE)
787 new_local_adv |= ADVERTISE_PAUSE_CAP;
788 if (local_adv & ADVERTISE_1000XPSE_ASYM)
789 new_local_adv |= ADVERTISE_PAUSE_ASYM;
790 if (remote_adv & ADVERTISE_1000XPAUSE)
791 new_remote_adv |= ADVERTISE_PAUSE_CAP;
792 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
793 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
795 local_adv = new_local_adv;
796 remote_adv = new_remote_adv;
799 /* See Table 28B-3 of 802.3ab-1999 spec. */
800 if (local_adv & ADVERTISE_PAUSE_CAP) {
801 if(local_adv & ADVERTISE_PAUSE_ASYM) {
802 if (remote_adv & ADVERTISE_PAUSE_CAP) {
803 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
805 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
806 bp->flow_ctrl = FLOW_CTRL_RX;
810 if (remote_adv & ADVERTISE_PAUSE_CAP) {
811 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
815 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
816 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
817 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
819 bp->flow_ctrl = FLOW_CTRL_TX;
825 bnx2_5709s_linkup(struct bnx2 *bp)
831 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
832 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
833 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
835 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
836 bp->line_speed = bp->req_line_speed;
837 bp->duplex = bp->req_duplex;
840 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
842 case MII_BNX2_GP_TOP_AN_SPEED_10:
843 bp->line_speed = SPEED_10;
845 case MII_BNX2_GP_TOP_AN_SPEED_100:
846 bp->line_speed = SPEED_100;
848 case MII_BNX2_GP_TOP_AN_SPEED_1G:
849 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
850 bp->line_speed = SPEED_1000;
852 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
853 bp->line_speed = SPEED_2500;
856 if (val & MII_BNX2_GP_TOP_AN_FD)
857 bp->duplex = DUPLEX_FULL;
859 bp->duplex = DUPLEX_HALF;
864 bnx2_5708s_linkup(struct bnx2 *bp)
869 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
870 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
871 case BCM5708S_1000X_STAT1_SPEED_10:
872 bp->line_speed = SPEED_10;
874 case BCM5708S_1000X_STAT1_SPEED_100:
875 bp->line_speed = SPEED_100;
877 case BCM5708S_1000X_STAT1_SPEED_1G:
878 bp->line_speed = SPEED_1000;
880 case BCM5708S_1000X_STAT1_SPEED_2G5:
881 bp->line_speed = SPEED_2500;
884 if (val & BCM5708S_1000X_STAT1_FD)
885 bp->duplex = DUPLEX_FULL;
887 bp->duplex = DUPLEX_HALF;
893 bnx2_5706s_linkup(struct bnx2 *bp)
895 u32 bmcr, local_adv, remote_adv, common;
898 bp->line_speed = SPEED_1000;
900 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
901 if (bmcr & BMCR_FULLDPLX) {
902 bp->duplex = DUPLEX_FULL;
905 bp->duplex = DUPLEX_HALF;
908 if (!(bmcr & BMCR_ANENABLE)) {
912 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
913 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
915 common = local_adv & remote_adv;
916 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
918 if (common & ADVERTISE_1000XFULL) {
919 bp->duplex = DUPLEX_FULL;
922 bp->duplex = DUPLEX_HALF;
930 bnx2_copper_linkup(struct bnx2 *bp)
934 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
935 if (bmcr & BMCR_ANENABLE) {
936 u32 local_adv, remote_adv, common;
938 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
939 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
941 common = local_adv & (remote_adv >> 2);
942 if (common & ADVERTISE_1000FULL) {
943 bp->line_speed = SPEED_1000;
944 bp->duplex = DUPLEX_FULL;
946 else if (common & ADVERTISE_1000HALF) {
947 bp->line_speed = SPEED_1000;
948 bp->duplex = DUPLEX_HALF;
951 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
952 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
954 common = local_adv & remote_adv;
955 if (common & ADVERTISE_100FULL) {
956 bp->line_speed = SPEED_100;
957 bp->duplex = DUPLEX_FULL;
959 else if (common & ADVERTISE_100HALF) {
960 bp->line_speed = SPEED_100;
961 bp->duplex = DUPLEX_HALF;
963 else if (common & ADVERTISE_10FULL) {
964 bp->line_speed = SPEED_10;
965 bp->duplex = DUPLEX_FULL;
967 else if (common & ADVERTISE_10HALF) {
968 bp->line_speed = SPEED_10;
969 bp->duplex = DUPLEX_HALF;
978 if (bmcr & BMCR_SPEED100) {
979 bp->line_speed = SPEED_100;
982 bp->line_speed = SPEED_10;
984 if (bmcr & BMCR_FULLDPLX) {
985 bp->duplex = DUPLEX_FULL;
988 bp->duplex = DUPLEX_HALF;
996 bnx2_init_rx_context0(struct bnx2 *bp)
998 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
1000 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1001 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1004 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1005 u32 lo_water, hi_water;
1007 if (bp->flow_ctrl & FLOW_CTRL_TX)
1008 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1010 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1011 if (lo_water >= bp->rx_ring_size)
1014 hi_water = bp->rx_ring_size / 4;
1016 if (hi_water <= lo_water)
1019 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1020 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1024 else if (hi_water == 0)
1026 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1028 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1032 bnx2_set_mac_link(struct bnx2 *bp)
1036 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1037 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1038 (bp->duplex == DUPLEX_HALF)) {
1039 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1042 /* Configure the EMAC mode register. */
1043 val = REG_RD(bp, BNX2_EMAC_MODE);
1045 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1046 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1047 BNX2_EMAC_MODE_25G_MODE);
1050 switch (bp->line_speed) {
1052 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1053 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1058 val |= BNX2_EMAC_MODE_PORT_MII;
1061 val |= BNX2_EMAC_MODE_25G_MODE;
1064 val |= BNX2_EMAC_MODE_PORT_GMII;
1069 val |= BNX2_EMAC_MODE_PORT_GMII;
1072 /* Set the MAC to operate in the appropriate duplex mode. */
1073 if (bp->duplex == DUPLEX_HALF)
1074 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1075 REG_WR(bp, BNX2_EMAC_MODE, val);
1077 /* Enable/disable rx PAUSE. */
1078 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1080 if (bp->flow_ctrl & FLOW_CTRL_RX)
1081 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1082 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1084 /* Enable/disable tx PAUSE. */
1085 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1086 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1088 if (bp->flow_ctrl & FLOW_CTRL_TX)
1089 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1090 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1092 /* Acknowledge the interrupt. */
1093 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1095 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1096 bnx2_init_rx_context0(bp);
1102 bnx2_enable_bmsr1(struct bnx2 *bp)
1104 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1105 (CHIP_NUM(bp) == CHIP_NUM_5709))
1106 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1107 MII_BNX2_BLK_ADDR_GP_STATUS);
1111 bnx2_disable_bmsr1(struct bnx2 *bp)
1113 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1114 (CHIP_NUM(bp) == CHIP_NUM_5709))
1115 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1116 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1120 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1125 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1128 if (bp->autoneg & AUTONEG_SPEED)
1129 bp->advertising |= ADVERTISED_2500baseX_Full;
1131 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1132 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1134 bnx2_read_phy(bp, bp->mii_up1, &up1);
1135 if (!(up1 & BCM5708S_UP1_2G5)) {
1136 up1 |= BCM5708S_UP1_2G5;
1137 bnx2_write_phy(bp, bp->mii_up1, up1);
1141 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1142 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1143 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1149 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1154 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1157 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1158 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1160 bnx2_read_phy(bp, bp->mii_up1, &up1);
1161 if (up1 & BCM5708S_UP1_2G5) {
1162 up1 &= ~BCM5708S_UP1_2G5;
1163 bnx2_write_phy(bp, bp->mii_up1, up1);
1167 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1168 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1169 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1175 bnx2_enable_forced_2g5(struct bnx2 *bp)
1179 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1182 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1185 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1186 MII_BNX2_BLK_ADDR_SERDES_DIG);
1187 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1188 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1189 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1190 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1192 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1193 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1194 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1196 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1197 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1198 bmcr |= BCM5708S_BMCR_FORCE_2500;
1201 if (bp->autoneg & AUTONEG_SPEED) {
1202 bmcr &= ~BMCR_ANENABLE;
1203 if (bp->req_duplex == DUPLEX_FULL)
1204 bmcr |= BMCR_FULLDPLX;
1206 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1210 bnx2_disable_forced_2g5(struct bnx2 *bp)
1214 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1217 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1220 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1221 MII_BNX2_BLK_ADDR_SERDES_DIG);
1222 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1223 val &= ~MII_BNX2_SD_MISC1_FORCE;
1224 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1226 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1227 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1228 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1230 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1231 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1232 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1235 if (bp->autoneg & AUTONEG_SPEED)
1236 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1237 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1241 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1245 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1246 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1248 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1250 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1254 bnx2_set_link(struct bnx2 *bp)
1259 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1264 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1267 link_up = bp->link_up;
1269 bnx2_enable_bmsr1(bp);
1270 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1271 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1272 bnx2_disable_bmsr1(bp);
1274 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1275 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1278 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1279 bnx2_5706s_force_link_dn(bp, 0);
1280 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1282 val = REG_RD(bp, BNX2_EMAC_STATUS);
1284 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1285 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1286 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1288 if ((val & BNX2_EMAC_STATUS_LINK) &&
1289 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1290 bmsr |= BMSR_LSTATUS;
1292 bmsr &= ~BMSR_LSTATUS;
1295 if (bmsr & BMSR_LSTATUS) {
1298 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1299 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1300 bnx2_5706s_linkup(bp);
1301 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1302 bnx2_5708s_linkup(bp);
1303 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1304 bnx2_5709s_linkup(bp);
1307 bnx2_copper_linkup(bp);
1309 bnx2_resolve_flow_ctrl(bp);
1312 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1313 (bp->autoneg & AUTONEG_SPEED))
1314 bnx2_disable_forced_2g5(bp);
1316 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1319 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1320 bmcr |= BMCR_ANENABLE;
1321 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1323 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1328 if (bp->link_up != link_up) {
1329 bnx2_report_link(bp);
1332 bnx2_set_mac_link(bp);
1338 bnx2_reset_phy(struct bnx2 *bp)
1343 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1345 #define PHY_RESET_MAX_WAIT 100
1346 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1349 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1350 if (!(reg & BMCR_RESET)) {
1355 if (i == PHY_RESET_MAX_WAIT) {
1362 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1366 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1367 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1369 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1370 adv = ADVERTISE_1000XPAUSE;
1373 adv = ADVERTISE_PAUSE_CAP;
1376 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1377 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1378 adv = ADVERTISE_1000XPSE_ASYM;
1381 adv = ADVERTISE_PAUSE_ASYM;
1384 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1385 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1386 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1389 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1395 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1398 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1400 u32 speed_arg = 0, pause_adv;
1402 pause_adv = bnx2_phy_get_pause_adv(bp);
1404 if (bp->autoneg & AUTONEG_SPEED) {
1405 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1406 if (bp->advertising & ADVERTISED_10baseT_Half)
1407 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1408 if (bp->advertising & ADVERTISED_10baseT_Full)
1409 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1410 if (bp->advertising & ADVERTISED_100baseT_Half)
1411 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1412 if (bp->advertising & ADVERTISED_100baseT_Full)
1413 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1414 if (bp->advertising & ADVERTISED_1000baseT_Full)
1415 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1416 if (bp->advertising & ADVERTISED_2500baseX_Full)
1417 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1419 if (bp->req_line_speed == SPEED_2500)
1420 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1421 else if (bp->req_line_speed == SPEED_1000)
1422 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1423 else if (bp->req_line_speed == SPEED_100) {
1424 if (bp->req_duplex == DUPLEX_FULL)
1425 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1427 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1428 } else if (bp->req_line_speed == SPEED_10) {
1429 if (bp->req_duplex == DUPLEX_FULL)
1430 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1432 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1436 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1437 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1438 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1439 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1441 if (port == PORT_TP)
1442 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1443 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1445 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1447 spin_unlock_bh(&bp->phy_lock);
1448 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1449 spin_lock_bh(&bp->phy_lock);
1455 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1460 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1461 return (bnx2_setup_remote_phy(bp, port));
1463 if (!(bp->autoneg & AUTONEG_SPEED)) {
1465 int force_link_down = 0;
1467 if (bp->req_line_speed == SPEED_2500) {
1468 if (!bnx2_test_and_enable_2g5(bp))
1469 force_link_down = 1;
1470 } else if (bp->req_line_speed == SPEED_1000) {
1471 if (bnx2_test_and_disable_2g5(bp))
1472 force_link_down = 1;
1474 bnx2_read_phy(bp, bp->mii_adv, &adv);
1475 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1477 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1478 new_bmcr = bmcr & ~BMCR_ANENABLE;
1479 new_bmcr |= BMCR_SPEED1000;
1481 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1482 if (bp->req_line_speed == SPEED_2500)
1483 bnx2_enable_forced_2g5(bp);
1484 else if (bp->req_line_speed == SPEED_1000) {
1485 bnx2_disable_forced_2g5(bp);
1486 new_bmcr &= ~0x2000;
1489 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1490 if (bp->req_line_speed == SPEED_2500)
1491 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1493 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1496 if (bp->req_duplex == DUPLEX_FULL) {
1497 adv |= ADVERTISE_1000XFULL;
1498 new_bmcr |= BMCR_FULLDPLX;
1501 adv |= ADVERTISE_1000XHALF;
1502 new_bmcr &= ~BMCR_FULLDPLX;
1504 if ((new_bmcr != bmcr) || (force_link_down)) {
1505 /* Force a link down visible on the other side */
1507 bnx2_write_phy(bp, bp->mii_adv, adv &
1508 ~(ADVERTISE_1000XFULL |
1509 ADVERTISE_1000XHALF));
1510 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1511 BMCR_ANRESTART | BMCR_ANENABLE);
1514 netif_carrier_off(bp->dev);
1515 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1516 bnx2_report_link(bp);
1518 bnx2_write_phy(bp, bp->mii_adv, adv);
1519 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1521 bnx2_resolve_flow_ctrl(bp);
1522 bnx2_set_mac_link(bp);
1527 bnx2_test_and_enable_2g5(bp);
1529 if (bp->advertising & ADVERTISED_1000baseT_Full)
1530 new_adv |= ADVERTISE_1000XFULL;
1532 new_adv |= bnx2_phy_get_pause_adv(bp);
1534 bnx2_read_phy(bp, bp->mii_adv, &adv);
1535 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1537 bp->serdes_an_pending = 0;
1538 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1539 /* Force a link down visible on the other side */
1541 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1542 spin_unlock_bh(&bp->phy_lock);
1544 spin_lock_bh(&bp->phy_lock);
1547 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1548 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1550 /* Speed up link-up time when the link partner
1551 * does not autonegotiate which is very common
1552 * in blade servers. Some blade servers use
1553 * IPMI for kerboard input and it's important
1554 * to minimize link disruptions. Autoneg. involves
1555 * exchanging base pages plus 3 next pages and
1556 * normally completes in about 120 msec.
1558 bp->current_interval = SERDES_AN_TIMEOUT;
1559 bp->serdes_an_pending = 1;
1560 mod_timer(&bp->timer, jiffies + bp->current_interval);
1562 bnx2_resolve_flow_ctrl(bp);
1563 bnx2_set_mac_link(bp);
1569 #define ETHTOOL_ALL_FIBRE_SPEED \
1570 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1571 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1572 (ADVERTISED_1000baseT_Full)
1574 #define ETHTOOL_ALL_COPPER_SPEED \
1575 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1576 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1577 ADVERTISED_1000baseT_Full)
1579 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1580 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1582 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1585 bnx2_set_default_remote_link(struct bnx2 *bp)
1589 if (bp->phy_port == PORT_TP)
1590 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1592 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1594 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1595 bp->req_line_speed = 0;
1596 bp->autoneg |= AUTONEG_SPEED;
1597 bp->advertising = ADVERTISED_Autoneg;
1598 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1599 bp->advertising |= ADVERTISED_10baseT_Half;
1600 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1601 bp->advertising |= ADVERTISED_10baseT_Full;
1602 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1603 bp->advertising |= ADVERTISED_100baseT_Half;
1604 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1605 bp->advertising |= ADVERTISED_100baseT_Full;
1606 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1607 bp->advertising |= ADVERTISED_1000baseT_Full;
1608 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1609 bp->advertising |= ADVERTISED_2500baseX_Full;
1612 bp->advertising = 0;
1613 bp->req_duplex = DUPLEX_FULL;
1614 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1615 bp->req_line_speed = SPEED_10;
1616 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1617 bp->req_duplex = DUPLEX_HALF;
1619 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1620 bp->req_line_speed = SPEED_100;
1621 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1622 bp->req_duplex = DUPLEX_HALF;
1624 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1625 bp->req_line_speed = SPEED_1000;
1626 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1627 bp->req_line_speed = SPEED_2500;
1632 bnx2_set_default_link(struct bnx2 *bp)
1634 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1635 bnx2_set_default_remote_link(bp);
1639 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1640 bp->req_line_speed = 0;
1641 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1644 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1646 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1647 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1648 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1650 bp->req_line_speed = bp->line_speed = SPEED_1000;
1651 bp->req_duplex = DUPLEX_FULL;
1654 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1658 bnx2_send_heart_beat(struct bnx2 *bp)
1663 spin_lock(&bp->indirect_lock);
1664 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1665 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1666 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1667 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1668 spin_unlock(&bp->indirect_lock);
1672 bnx2_remote_phy_event(struct bnx2 *bp)
1675 u8 link_up = bp->link_up;
1678 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1680 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1681 bnx2_send_heart_beat(bp);
1683 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1685 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1691 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1692 bp->duplex = DUPLEX_FULL;
1694 case BNX2_LINK_STATUS_10HALF:
1695 bp->duplex = DUPLEX_HALF;
1696 case BNX2_LINK_STATUS_10FULL:
1697 bp->line_speed = SPEED_10;
1699 case BNX2_LINK_STATUS_100HALF:
1700 bp->duplex = DUPLEX_HALF;
1701 case BNX2_LINK_STATUS_100BASE_T4:
1702 case BNX2_LINK_STATUS_100FULL:
1703 bp->line_speed = SPEED_100;
1705 case BNX2_LINK_STATUS_1000HALF:
1706 bp->duplex = DUPLEX_HALF;
1707 case BNX2_LINK_STATUS_1000FULL:
1708 bp->line_speed = SPEED_1000;
1710 case BNX2_LINK_STATUS_2500HALF:
1711 bp->duplex = DUPLEX_HALF;
1712 case BNX2_LINK_STATUS_2500FULL:
1713 bp->line_speed = SPEED_2500;
1721 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1722 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1723 if (bp->duplex == DUPLEX_FULL)
1724 bp->flow_ctrl = bp->req_flow_ctrl;
1726 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1727 bp->flow_ctrl |= FLOW_CTRL_TX;
1728 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1729 bp->flow_ctrl |= FLOW_CTRL_RX;
1732 old_port = bp->phy_port;
1733 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1734 bp->phy_port = PORT_FIBRE;
1736 bp->phy_port = PORT_TP;
1738 if (old_port != bp->phy_port)
1739 bnx2_set_default_link(bp);
1742 if (bp->link_up != link_up)
1743 bnx2_report_link(bp);
1745 bnx2_set_mac_link(bp);
1749 bnx2_set_remote_link(struct bnx2 *bp)
1753 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1755 case BNX2_FW_EVT_CODE_LINK_EVENT:
1756 bnx2_remote_phy_event(bp);
1758 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1760 bnx2_send_heart_beat(bp);
1767 bnx2_setup_copper_phy(struct bnx2 *bp)
1772 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1774 if (bp->autoneg & AUTONEG_SPEED) {
1775 u32 adv_reg, adv1000_reg;
1776 u32 new_adv_reg = 0;
1777 u32 new_adv1000_reg = 0;
1779 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1780 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1781 ADVERTISE_PAUSE_ASYM);
1783 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1784 adv1000_reg &= PHY_ALL_1000_SPEED;
1786 if (bp->advertising & ADVERTISED_10baseT_Half)
1787 new_adv_reg |= ADVERTISE_10HALF;
1788 if (bp->advertising & ADVERTISED_10baseT_Full)
1789 new_adv_reg |= ADVERTISE_10FULL;
1790 if (bp->advertising & ADVERTISED_100baseT_Half)
1791 new_adv_reg |= ADVERTISE_100HALF;
1792 if (bp->advertising & ADVERTISED_100baseT_Full)
1793 new_adv_reg |= ADVERTISE_100FULL;
1794 if (bp->advertising & ADVERTISED_1000baseT_Full)
1795 new_adv1000_reg |= ADVERTISE_1000FULL;
1797 new_adv_reg |= ADVERTISE_CSMA;
1799 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1801 if ((adv1000_reg != new_adv1000_reg) ||
1802 (adv_reg != new_adv_reg) ||
1803 ((bmcr & BMCR_ANENABLE) == 0)) {
1805 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1806 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1807 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1810 else if (bp->link_up) {
1811 /* Flow ctrl may have changed from auto to forced */
1812 /* or vice-versa. */
1814 bnx2_resolve_flow_ctrl(bp);
1815 bnx2_set_mac_link(bp);
1821 if (bp->req_line_speed == SPEED_100) {
1822 new_bmcr |= BMCR_SPEED100;
1824 if (bp->req_duplex == DUPLEX_FULL) {
1825 new_bmcr |= BMCR_FULLDPLX;
1827 if (new_bmcr != bmcr) {
1830 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1831 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1833 if (bmsr & BMSR_LSTATUS) {
1834 /* Force link down */
1835 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1836 spin_unlock_bh(&bp->phy_lock);
1838 spin_lock_bh(&bp->phy_lock);
1840 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1841 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1844 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1846 /* Normally, the new speed is setup after the link has
1847 * gone down and up again. In some cases, link will not go
1848 * down so we need to set up the new speed here.
1850 if (bmsr & BMSR_LSTATUS) {
1851 bp->line_speed = bp->req_line_speed;
1852 bp->duplex = bp->req_duplex;
1853 bnx2_resolve_flow_ctrl(bp);
1854 bnx2_set_mac_link(bp);
1857 bnx2_resolve_flow_ctrl(bp);
1858 bnx2_set_mac_link(bp);
1864 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1866 if (bp->loopback == MAC_LOOPBACK)
1869 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1870 return (bnx2_setup_serdes_phy(bp, port));
1873 return (bnx2_setup_copper_phy(bp));
1878 bnx2_init_5709s_phy(struct bnx2 *bp)
1882 bp->mii_bmcr = MII_BMCR + 0x10;
1883 bp->mii_bmsr = MII_BMSR + 0x10;
1884 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1885 bp->mii_adv = MII_ADVERTISE + 0x10;
1886 bp->mii_lpa = MII_LPA + 0x10;
1887 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1889 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1890 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1892 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1895 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1897 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1898 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1899 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1900 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1902 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1903 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1904 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
1905 val |= BCM5708S_UP1_2G5;
1907 val &= ~BCM5708S_UP1_2G5;
1908 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1910 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1911 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1912 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1913 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1915 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1917 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1918 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1919 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1921 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1927 bnx2_init_5708s_phy(struct bnx2 *bp)
1933 bp->mii_up1 = BCM5708S_UP1;
1935 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1936 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1937 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1939 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1940 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1941 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1943 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1944 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1945 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1947 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
1948 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1949 val |= BCM5708S_UP1_2G5;
1950 bnx2_write_phy(bp, BCM5708S_UP1, val);
1953 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1954 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1955 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1956 /* increase tx signal amplitude */
1957 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1958 BCM5708S_BLK_ADDR_TX_MISC);
1959 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1960 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1961 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1962 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1965 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
1966 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1971 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
1972 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1973 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1974 BCM5708S_BLK_ADDR_TX_MISC);
1975 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1976 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1977 BCM5708S_BLK_ADDR_DIG);
1984 bnx2_init_5706s_phy(struct bnx2 *bp)
1988 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1990 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1991 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1993 if (bp->dev->mtu > 1500) {
1996 /* Set extended packet length bit */
1997 bnx2_write_phy(bp, 0x18, 0x7);
1998 bnx2_read_phy(bp, 0x18, &val);
1999 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2001 bnx2_write_phy(bp, 0x1c, 0x6c00);
2002 bnx2_read_phy(bp, 0x1c, &val);
2003 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2008 bnx2_write_phy(bp, 0x18, 0x7);
2009 bnx2_read_phy(bp, 0x18, &val);
2010 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2012 bnx2_write_phy(bp, 0x1c, 0x6c00);
2013 bnx2_read_phy(bp, 0x1c, &val);
2014 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2021 bnx2_init_copper_phy(struct bnx2 *bp)
2027 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2028 bnx2_write_phy(bp, 0x18, 0x0c00);
2029 bnx2_write_phy(bp, 0x17, 0x000a);
2030 bnx2_write_phy(bp, 0x15, 0x310b);
2031 bnx2_write_phy(bp, 0x17, 0x201f);
2032 bnx2_write_phy(bp, 0x15, 0x9506);
2033 bnx2_write_phy(bp, 0x17, 0x401f);
2034 bnx2_write_phy(bp, 0x15, 0x14e2);
2035 bnx2_write_phy(bp, 0x18, 0x0400);
2038 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2039 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2040 MII_BNX2_DSP_EXPAND_REG | 0x8);
2041 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2043 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2046 if (bp->dev->mtu > 1500) {
2047 /* Set extended packet length bit */
2048 bnx2_write_phy(bp, 0x18, 0x7);
2049 bnx2_read_phy(bp, 0x18, &val);
2050 bnx2_write_phy(bp, 0x18, val | 0x4000);
2052 bnx2_read_phy(bp, 0x10, &val);
2053 bnx2_write_phy(bp, 0x10, val | 0x1);
2056 bnx2_write_phy(bp, 0x18, 0x7);
2057 bnx2_read_phy(bp, 0x18, &val);
2058 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2060 bnx2_read_phy(bp, 0x10, &val);
2061 bnx2_write_phy(bp, 0x10, val & ~0x1);
2064 /* ethernet@wirespeed */
2065 bnx2_write_phy(bp, 0x18, 0x7007);
2066 bnx2_read_phy(bp, 0x18, &val);
2067 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2073 bnx2_init_phy(struct bnx2 *bp)
2078 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2079 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2081 bp->mii_bmcr = MII_BMCR;
2082 bp->mii_bmsr = MII_BMSR;
2083 bp->mii_bmsr1 = MII_BMSR;
2084 bp->mii_adv = MII_ADVERTISE;
2085 bp->mii_lpa = MII_LPA;
2087 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2089 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2092 bnx2_read_phy(bp, MII_PHYSID1, &val);
2093 bp->phy_id = val << 16;
2094 bnx2_read_phy(bp, MII_PHYSID2, &val);
2095 bp->phy_id |= val & 0xffff;
2097 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2098 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2099 rc = bnx2_init_5706s_phy(bp);
2100 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2101 rc = bnx2_init_5708s_phy(bp);
2102 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2103 rc = bnx2_init_5709s_phy(bp);
2106 rc = bnx2_init_copper_phy(bp);
2111 rc = bnx2_setup_phy(bp, bp->phy_port);
2117 bnx2_set_mac_loopback(struct bnx2 *bp)
2121 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2122 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2123 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2124 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2129 static int bnx2_test_link(struct bnx2 *);
2132 bnx2_set_phy_loopback(struct bnx2 *bp)
2137 spin_lock_bh(&bp->phy_lock);
2138 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2140 spin_unlock_bh(&bp->phy_lock);
2144 for (i = 0; i < 10; i++) {
2145 if (bnx2_test_link(bp) == 0)
2150 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2151 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2152 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2153 BNX2_EMAC_MODE_25G_MODE);
2155 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2156 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2162 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2168 msg_data |= bp->fw_wr_seq;
2170 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2172 /* wait for an acknowledgement. */
2173 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2176 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2178 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2181 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2184 /* If we timed out, inform the firmware that this is the case. */
2185 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2187 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2190 msg_data &= ~BNX2_DRV_MSG_CODE;
2191 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2193 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2198 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2205 bnx2_init_5709_context(struct bnx2 *bp)
2210 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2211 val |= (BCM_PAGE_BITS - 8) << 16;
2212 REG_WR(bp, BNX2_CTX_COMMAND, val);
2213 for (i = 0; i < 10; i++) {
2214 val = REG_RD(bp, BNX2_CTX_COMMAND);
2215 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2219 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2222 for (i = 0; i < bp->ctx_pages; i++) {
2226 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2230 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2231 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2232 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2233 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2234 (u64) bp->ctx_blk_mapping[i] >> 32);
2235 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2236 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2237 for (j = 0; j < 10; j++) {
2239 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2240 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2244 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2253 bnx2_init_context(struct bnx2 *bp)
2259 u32 vcid_addr, pcid_addr, offset;
2264 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2267 vcid_addr = GET_PCID_ADDR(vcid);
2269 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2274 pcid_addr = GET_PCID_ADDR(new_vcid);
2277 vcid_addr = GET_CID_ADDR(vcid);
2278 pcid_addr = vcid_addr;
2281 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2282 vcid_addr += (i << PHY_CTX_SHIFT);
2283 pcid_addr += (i << PHY_CTX_SHIFT);
2285 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2286 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2288 /* Zero out the context. */
2289 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2290 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2296 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2302 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2303 if (good_mbuf == NULL) {
2304 printk(KERN_ERR PFX "Failed to allocate memory in "
2305 "bnx2_alloc_bad_rbuf\n");
2309 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2310 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2314 /* Allocate a bunch of mbufs and save the good ones in an array. */
2315 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2316 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2317 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2318 BNX2_RBUF_COMMAND_ALLOC_REQ);
2320 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2322 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2324 /* The addresses with Bit 9 set are bad memory blocks. */
2325 if (!(val & (1 << 9))) {
2326 good_mbuf[good_mbuf_cnt] = (u16) val;
2330 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2333 /* Free the good ones back to the mbuf pool thus discarding
2334 * all the bad ones. */
2335 while (good_mbuf_cnt) {
2338 val = good_mbuf[good_mbuf_cnt];
2339 val = (val << 9) | val | 1;
2341 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2348 bnx2_set_mac_addr(struct bnx2 *bp)
2351 u8 *mac_addr = bp->dev->dev_addr;
2353 val = (mac_addr[0] << 8) | mac_addr[1];
2355 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2357 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2358 (mac_addr[4] << 8) | mac_addr[5];
2360 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2364 bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2367 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2368 struct rx_bd *rxbd =
2369 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2370 struct page *page = alloc_page(GFP_ATOMIC);
2374 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2375 PCI_DMA_FROMDEVICE);
2377 pci_unmap_addr_set(rx_pg, mapping, mapping);
2378 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2379 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2384 bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2386 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2387 struct page *page = rx_pg->page;
2392 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2393 PCI_DMA_FROMDEVICE);
2400 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
2402 struct sk_buff *skb;
2403 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2405 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2406 unsigned long align;
2408 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2413 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2414 skb_reserve(skb, BNX2_RX_ALIGN - align);
2416 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2417 PCI_DMA_FROMDEVICE);
2420 pci_unmap_addr_set(rx_buf, mapping, mapping);
2422 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2423 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2425 bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2431 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2433 struct status_block *sblk = bnapi->status_blk;
2434 u32 new_link_state, old_link_state;
2437 new_link_state = sblk->status_attn_bits & event;
2438 old_link_state = sblk->status_attn_bits_ack & event;
2439 if (new_link_state != old_link_state) {
2441 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2443 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2451 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2453 spin_lock(&bp->phy_lock);
2455 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2457 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2458 bnx2_set_remote_link(bp);
2460 spin_unlock(&bp->phy_lock);
2465 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2469 if (bnapi->int_num == 0)
2470 cons = bnapi->status_blk->status_tx_quick_consumer_index0;
2472 cons = bnapi->status_blk_msix->status_tx_quick_consumer_index;
2474 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2480 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2482 u16 hw_cons, sw_cons, sw_ring_cons;
2485 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2486 sw_cons = bnapi->tx_cons;
2488 while (sw_cons != hw_cons) {
2489 struct sw_bd *tx_buf;
2490 struct sk_buff *skb;
2493 sw_ring_cons = TX_RING_IDX(sw_cons);
2495 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2498 /* partial BD completions possible with TSO packets */
2499 if (skb_is_gso(skb)) {
2500 u16 last_idx, last_ring_idx;
2502 last_idx = sw_cons +
2503 skb_shinfo(skb)->nr_frags + 1;
2504 last_ring_idx = sw_ring_cons +
2505 skb_shinfo(skb)->nr_frags + 1;
2506 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2509 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2514 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2515 skb_headlen(skb), PCI_DMA_TODEVICE);
2518 last = skb_shinfo(skb)->nr_frags;
2520 for (i = 0; i < last; i++) {
2521 sw_cons = NEXT_TX_BD(sw_cons);
2523 pci_unmap_page(bp->pdev,
2525 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2527 skb_shinfo(skb)->frags[i].size,
2531 sw_cons = NEXT_TX_BD(sw_cons);
2535 if (tx_pkt == budget)
2538 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2541 bnapi->hw_tx_cons = hw_cons;
2542 bnapi->tx_cons = sw_cons;
2543 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2544 * before checking for netif_queue_stopped(). Without the
2545 * memory barrier, there is a small possibility that bnx2_start_xmit()
2546 * will miss it and cause the queue to be stopped forever.
2550 if (unlikely(netif_queue_stopped(bp->dev)) &&
2551 (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) {
2552 netif_tx_lock(bp->dev);
2553 if ((netif_queue_stopped(bp->dev)) &&
2554 (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh))
2555 netif_wake_queue(bp->dev);
2556 netif_tx_unlock(bp->dev);
2562 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi,
2563 struct sk_buff *skb, int count)
2565 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2566 struct rx_bd *cons_bd, *prod_bd;
2569 u16 hw_prod = bnapi->rx_pg_prod, prod;
2570 u16 cons = bnapi->rx_pg_cons;
2572 for (i = 0; i < count; i++) {
2573 prod = RX_PG_RING_IDX(hw_prod);
2575 prod_rx_pg = &bp->rx_pg_ring[prod];
2576 cons_rx_pg = &bp->rx_pg_ring[cons];
2577 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2578 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2580 if (i == 0 && skb) {
2582 struct skb_shared_info *shinfo;
2584 shinfo = skb_shinfo(skb);
2586 page = shinfo->frags[shinfo->nr_frags].page;
2587 shinfo->frags[shinfo->nr_frags].page = NULL;
2588 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2589 PCI_DMA_FROMDEVICE);
2590 cons_rx_pg->page = page;
2591 pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2595 prod_rx_pg->page = cons_rx_pg->page;
2596 cons_rx_pg->page = NULL;
2597 pci_unmap_addr_set(prod_rx_pg, mapping,
2598 pci_unmap_addr(cons_rx_pg, mapping));
2600 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2601 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2604 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2605 hw_prod = NEXT_RX_BD(hw_prod);
2607 bnapi->rx_pg_prod = hw_prod;
2608 bnapi->rx_pg_cons = cons;
2612 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2615 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2616 struct rx_bd *cons_bd, *prod_bd;
2618 cons_rx_buf = &bp->rx_buf_ring[cons];
2619 prod_rx_buf = &bp->rx_buf_ring[prod];
2621 pci_dma_sync_single_for_device(bp->pdev,
2622 pci_unmap_addr(cons_rx_buf, mapping),
2623 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2625 bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2627 prod_rx_buf->skb = skb;
2632 pci_unmap_addr_set(prod_rx_buf, mapping,
2633 pci_unmap_addr(cons_rx_buf, mapping));
2635 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2636 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2637 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2638 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2642 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2643 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2647 u16 prod = ring_idx & 0xffff;
2649 err = bnx2_alloc_rx_skb(bp, bnapi, prod);
2650 if (unlikely(err)) {
2651 bnx2_reuse_rx_skb(bp, bnapi, skb, (u16) (ring_idx >> 16), prod);
2653 unsigned int raw_len = len + 4;
2654 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2656 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL, pages);
2661 skb_reserve(skb, bp->rx_offset);
2662 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2663 PCI_DMA_FROMDEVICE);
2669 unsigned int i, frag_len, frag_size, pages;
2670 struct sw_pg *rx_pg;
2671 u16 pg_cons = bnapi->rx_pg_cons;
2672 u16 pg_prod = bnapi->rx_pg_prod;
2674 frag_size = len + 4 - hdr_len;
2675 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2676 skb_put(skb, hdr_len);
2678 for (i = 0; i < pages; i++) {
2679 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2680 if (unlikely(frag_len <= 4)) {
2681 unsigned int tail = 4 - frag_len;
2683 bnapi->rx_pg_cons = pg_cons;
2684 bnapi->rx_pg_prod = pg_prod;
2685 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL,
2692 &skb_shinfo(skb)->frags[i - 1];
2694 skb->data_len -= tail;
2695 skb->truesize -= tail;
2699 rx_pg = &bp->rx_pg_ring[pg_cons];
2701 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2702 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2707 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2710 err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2711 if (unlikely(err)) {
2712 bnapi->rx_pg_cons = pg_cons;
2713 bnapi->rx_pg_prod = pg_prod;
2714 bnx2_reuse_rx_skb_pages(bp, bnapi, skb,
2719 frag_size -= frag_len;
2720 skb->data_len += frag_len;
2721 skb->truesize += frag_len;
2722 skb->len += frag_len;
2724 pg_prod = NEXT_RX_BD(pg_prod);
2725 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2727 bnapi->rx_pg_prod = pg_prod;
2728 bnapi->rx_pg_cons = pg_cons;
2734 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2736 u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
2738 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2744 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2746 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2747 struct l2_fhdr *rx_hdr;
2748 int rx_pkt = 0, pg_ring_used = 0;
2750 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2751 sw_cons = bnapi->rx_cons;
2752 sw_prod = bnapi->rx_prod;
2754 /* Memory barrier necessary as speculative reads of the rx
2755 * buffer can be ahead of the index in the status block
2758 while (sw_cons != hw_cons) {
2759 unsigned int len, hdr_len;
2761 struct sw_bd *rx_buf;
2762 struct sk_buff *skb;
2763 dma_addr_t dma_addr;
2765 sw_ring_cons = RX_RING_IDX(sw_cons);
2766 sw_ring_prod = RX_RING_IDX(sw_prod);
2768 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2773 dma_addr = pci_unmap_addr(rx_buf, mapping);
2775 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2776 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2778 rx_hdr = (struct l2_fhdr *) skb->data;
2779 len = rx_hdr->l2_fhdr_pkt_len;
2781 if ((status = rx_hdr->l2_fhdr_status) &
2782 (L2_FHDR_ERRORS_BAD_CRC |
2783 L2_FHDR_ERRORS_PHY_DECODE |
2784 L2_FHDR_ERRORS_ALIGNMENT |
2785 L2_FHDR_ERRORS_TOO_SHORT |
2786 L2_FHDR_ERRORS_GIANT_FRAME)) {
2788 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2793 if (status & L2_FHDR_STATUS_SPLIT) {
2794 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2796 } else if (len > bp->rx_jumbo_thresh) {
2797 hdr_len = bp->rx_jumbo_thresh;
2803 if (len <= bp->rx_copy_thresh) {
2804 struct sk_buff *new_skb;
2806 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2807 if (new_skb == NULL) {
2808 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2814 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2815 new_skb->data, len + 2);
2816 skb_reserve(new_skb, 2);
2817 skb_put(new_skb, len);
2819 bnx2_reuse_rx_skb(bp, bnapi, skb,
2820 sw_ring_cons, sw_ring_prod);
2823 } else if (unlikely(bnx2_rx_skb(bp, bnapi, skb, len, hdr_len,
2824 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2827 skb->protocol = eth_type_trans(skb, bp->dev);
2829 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2830 (ntohs(skb->protocol) != 0x8100)) {
2837 skb->ip_summed = CHECKSUM_NONE;
2839 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2840 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2842 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2843 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2844 skb->ip_summed = CHECKSUM_UNNECESSARY;
2848 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
2849 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2850 rx_hdr->l2_fhdr_vlan_tag);
2854 netif_receive_skb(skb);
2856 bp->dev->last_rx = jiffies;
2860 sw_cons = NEXT_RX_BD(sw_cons);
2861 sw_prod = NEXT_RX_BD(sw_prod);
2863 if ((rx_pkt == budget))
2866 /* Refresh hw_cons to see if there is new work */
2867 if (sw_cons == hw_cons) {
2868 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2872 bnapi->rx_cons = sw_cons;
2873 bnapi->rx_prod = sw_prod;
2876 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2879 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2881 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
2889 /* MSI ISR - The only difference between this and the INTx ISR
2890 * is that the MSI interrupt is always serviced.
2893 bnx2_msi(int irq, void *dev_instance)
2895 struct net_device *dev = dev_instance;
2896 struct bnx2 *bp = netdev_priv(dev);
2897 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2899 prefetch(bnapi->status_blk);
2900 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2901 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2902 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2904 /* Return here if interrupt is disabled. */
2905 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2908 netif_rx_schedule(dev, &bnapi->napi);
2914 bnx2_msi_1shot(int irq, void *dev_instance)
2916 struct net_device *dev = dev_instance;
2917 struct bnx2 *bp = netdev_priv(dev);
2918 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2920 prefetch(bnapi->status_blk);
2922 /* Return here if interrupt is disabled. */
2923 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2926 netif_rx_schedule(dev, &bnapi->napi);
2932 bnx2_interrupt(int irq, void *dev_instance)
2934 struct net_device *dev = dev_instance;
2935 struct bnx2 *bp = netdev_priv(dev);
2936 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2937 struct status_block *sblk = bnapi->status_blk;
2939 /* When using INTx, it is possible for the interrupt to arrive
2940 * at the CPU before the status block posted prior to the
2941 * interrupt. Reading a register will flush the status block.
2942 * When using MSI, the MSI message will always complete after
2943 * the status block write.
2945 if ((sblk->status_idx == bnapi->last_status_idx) &&
2946 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2947 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2950 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2951 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2952 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2954 /* Read back to deassert IRQ immediately to avoid too many
2955 * spurious interrupts.
2957 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2959 /* Return here if interrupt is shared and is disabled. */
2960 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2963 if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
2964 bnapi->last_status_idx = sblk->status_idx;
2965 __netif_rx_schedule(dev, &bnapi->napi);
2972 bnx2_tx_msix(int irq, void *dev_instance)
2974 struct net_device *dev = dev_instance;
2975 struct bnx2 *bp = netdev_priv(dev);
2976 struct bnx2_napi *bnapi = &bp->bnx2_napi[BNX2_TX_VEC];
2978 prefetch(bnapi->status_blk_msix);
2980 /* Return here if interrupt is disabled. */
2981 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2984 netif_rx_schedule(dev, &bnapi->napi);
2988 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2989 STATUS_ATTN_BITS_TIMER_ABORT)
2992 bnx2_has_work(struct bnx2_napi *bnapi)
2994 struct status_block *sblk = bnapi->status_blk;
2996 if ((bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) ||
2997 (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons))
3000 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3001 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3007 static int bnx2_tx_poll(struct napi_struct *napi, int budget)
3009 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3010 struct bnx2 *bp = bnapi->bp;
3012 struct status_block_msix *sblk = bnapi->status_blk_msix;
3015 work_done += bnx2_tx_int(bp, bnapi, budget - work_done);
3016 if (unlikely(work_done >= budget))
3019 bnapi->last_status_idx = sblk->status_idx;
3021 } while (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons);
3023 netif_rx_complete(bp->dev, napi);
3024 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3025 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3026 bnapi->last_status_idx);
3030 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3031 int work_done, int budget)
3033 struct status_block *sblk = bnapi->status_blk;
3034 u32 status_attn_bits = sblk->status_attn_bits;
3035 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3037 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3038 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3040 bnx2_phy_int(bp, bnapi);
3042 /* This is needed to take care of transient status
3043 * during link changes.
3045 REG_WR(bp, BNX2_HC_COMMAND,
3046 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3047 REG_RD(bp, BNX2_HC_COMMAND);
3050 if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons)
3051 bnx2_tx_int(bp, bnapi, 0);
3053 if (bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons)
3054 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3059 static int bnx2_poll(struct napi_struct *napi, int budget)
3061 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3062 struct bnx2 *bp = bnapi->bp;
3064 struct status_block *sblk = bnapi->status_blk;
3067 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3069 if (unlikely(work_done >= budget))
3072 /* bnapi->last_status_idx is used below to tell the hw how
3073 * much work has been processed, so we must read it before
3074 * checking for more work.
3076 bnapi->last_status_idx = sblk->status_idx;
3078 if (likely(!bnx2_has_work(bnapi))) {
3079 netif_rx_complete(bp->dev, napi);
3080 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3081 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3082 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3083 bnapi->last_status_idx);
3086 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3087 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3088 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3089 bnapi->last_status_idx);
3091 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3092 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3093 bnapi->last_status_idx);
3101 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3102 * from set_multicast.
3105 bnx2_set_rx_mode(struct net_device *dev)
3107 struct bnx2 *bp = netdev_priv(dev);
3108 u32 rx_mode, sort_mode;
3111 spin_lock_bh(&bp->phy_lock);
3113 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3114 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3115 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3117 if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE))
3118 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3120 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
3121 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3123 if (dev->flags & IFF_PROMISC) {
3124 /* Promiscuous mode. */
3125 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3126 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3127 BNX2_RPM_SORT_USER0_PROM_VLAN;
3129 else if (dev->flags & IFF_ALLMULTI) {
3130 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3131 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3134 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3137 /* Accept one or more multicast(s). */
3138 struct dev_mc_list *mclist;
3139 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3144 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3146 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3147 i++, mclist = mclist->next) {
3149 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3151 regidx = (bit & 0xe0) >> 5;
3153 mc_filter[regidx] |= (1 << bit);
3156 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3157 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3161 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3164 if (rx_mode != bp->rx_mode) {
3165 bp->rx_mode = rx_mode;
3166 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3169 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3170 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3171 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3173 spin_unlock_bh(&bp->phy_lock);
3177 load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3184 for (i = 0; i < rv2p_code_len; i += 8) {
3185 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
3187 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
3190 if (rv2p_proc == RV2P_PROC1) {
3191 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3192 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3195 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3196 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3200 /* Reset the processor, un-stall is done later. */
3201 if (rv2p_proc == RV2P_PROC1) {
3202 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3205 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3210 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3217 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3218 val |= cpu_reg->mode_value_halt;
3219 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3220 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3222 /* Load the Text area. */
3223 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3227 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3232 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3233 bnx2_reg_wr_ind(bp, offset, le32_to_cpu(fw->text[j]));
3237 /* Load the Data area. */
3238 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3242 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3243 bnx2_reg_wr_ind(bp, offset, fw->data[j]);
3247 /* Load the SBSS area. */
3248 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3252 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3253 bnx2_reg_wr_ind(bp, offset, 0);
3257 /* Load the BSS area. */
3258 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3262 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3263 bnx2_reg_wr_ind(bp, offset, 0);
3267 /* Load the Read-Only area. */
3268 offset = cpu_reg->spad_base +
3269 (fw->rodata_addr - cpu_reg->mips_view_base);
3273 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3274 bnx2_reg_wr_ind(bp, offset, fw->rodata[j]);
3278 /* Clear the pre-fetch instruction. */
3279 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3280 bnx2_reg_wr_ind(bp, cpu_reg->pc, fw->start_addr);
3282 /* Start the CPU. */
3283 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3284 val &= ~cpu_reg->mode_value_halt;
3285 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3286 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3292 bnx2_init_cpus(struct bnx2 *bp)
3294 struct cpu_reg cpu_reg;
3299 /* Initialize the RV2P processor. */
3300 text = vmalloc(FW_BUF_SIZE);
3303 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3304 rv2p = bnx2_xi_rv2p_proc1;
3305 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3307 rv2p = bnx2_rv2p_proc1;
3308 rv2p_len = sizeof(bnx2_rv2p_proc1);
3310 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3314 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3316 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3317 rv2p = bnx2_xi_rv2p_proc2;
3318 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3320 rv2p = bnx2_rv2p_proc2;
3321 rv2p_len = sizeof(bnx2_rv2p_proc2);
3323 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3327 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3329 /* Initialize the RX Processor. */
3330 cpu_reg.mode = BNX2_RXP_CPU_MODE;
3331 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3332 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3333 cpu_reg.state = BNX2_RXP_CPU_STATE;
3334 cpu_reg.state_value_clear = 0xffffff;
3335 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3336 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3337 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3338 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3339 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3340 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3341 cpu_reg.mips_view_base = 0x8000000;
3343 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3344 fw = &bnx2_rxp_fw_09;
3346 fw = &bnx2_rxp_fw_06;
3349 rc = load_cpu_fw(bp, &cpu_reg, fw);
3353 /* Initialize the TX Processor. */
3354 cpu_reg.mode = BNX2_TXP_CPU_MODE;
3355 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3356 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3357 cpu_reg.state = BNX2_TXP_CPU_STATE;
3358 cpu_reg.state_value_clear = 0xffffff;
3359 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3360 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3361 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3362 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3363 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3364 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3365 cpu_reg.mips_view_base = 0x8000000;
3367 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3368 fw = &bnx2_txp_fw_09;
3370 fw = &bnx2_txp_fw_06;
3373 rc = load_cpu_fw(bp, &cpu_reg, fw);
3377 /* Initialize the TX Patch-up Processor. */
3378 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3379 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3380 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3381 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3382 cpu_reg.state_value_clear = 0xffffff;
3383 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3384 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3385 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3386 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3387 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3388 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3389 cpu_reg.mips_view_base = 0x8000000;
3391 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3392 fw = &bnx2_tpat_fw_09;
3394 fw = &bnx2_tpat_fw_06;
3397 rc = load_cpu_fw(bp, &cpu_reg, fw);
3401 /* Initialize the Completion Processor. */
3402 cpu_reg.mode = BNX2_COM_CPU_MODE;
3403 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3404 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3405 cpu_reg.state = BNX2_COM_CPU_STATE;
3406 cpu_reg.state_value_clear = 0xffffff;
3407 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3408 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3409 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3410 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3411 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3412 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3413 cpu_reg.mips_view_base = 0x8000000;
3415 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3416 fw = &bnx2_com_fw_09;
3418 fw = &bnx2_com_fw_06;
3421 rc = load_cpu_fw(bp, &cpu_reg, fw);
3425 /* Initialize the Command Processor. */
3426 cpu_reg.mode = BNX2_CP_CPU_MODE;
3427 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3428 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3429 cpu_reg.state = BNX2_CP_CPU_STATE;
3430 cpu_reg.state_value_clear = 0xffffff;
3431 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3432 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3433 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3434 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3435 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3436 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3437 cpu_reg.mips_view_base = 0x8000000;
3439 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3440 fw = &bnx2_cp_fw_09;
3442 fw = &bnx2_cp_fw_06;
3445 rc = load_cpu_fw(bp, &cpu_reg, fw);
3453 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3457 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3463 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3464 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3465 PCI_PM_CTRL_PME_STATUS);
3467 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3468 /* delay required during transition out of D3hot */
3471 val = REG_RD(bp, BNX2_EMAC_MODE);
3472 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3473 val &= ~BNX2_EMAC_MODE_MPKT;
3474 REG_WR(bp, BNX2_EMAC_MODE, val);
3476 val = REG_RD(bp, BNX2_RPM_CONFIG);
3477 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3478 REG_WR(bp, BNX2_RPM_CONFIG, val);
3489 autoneg = bp->autoneg;
3490 advertising = bp->advertising;
3492 if (bp->phy_port == PORT_TP) {
3493 bp->autoneg = AUTONEG_SPEED;
3494 bp->advertising = ADVERTISED_10baseT_Half |
3495 ADVERTISED_10baseT_Full |
3496 ADVERTISED_100baseT_Half |
3497 ADVERTISED_100baseT_Full |
3501 spin_lock_bh(&bp->phy_lock);
3502 bnx2_setup_phy(bp, bp->phy_port);
3503 spin_unlock_bh(&bp->phy_lock);
3505 bp->autoneg = autoneg;
3506 bp->advertising = advertising;
3508 bnx2_set_mac_addr(bp);
3510 val = REG_RD(bp, BNX2_EMAC_MODE);
3512 /* Enable port mode. */
3513 val &= ~BNX2_EMAC_MODE_PORT;
3514 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3515 BNX2_EMAC_MODE_ACPI_RCVD |
3516 BNX2_EMAC_MODE_MPKT;
3517 if (bp->phy_port == PORT_TP)
3518 val |= BNX2_EMAC_MODE_PORT_MII;
3520 val |= BNX2_EMAC_MODE_PORT_GMII;
3521 if (bp->line_speed == SPEED_2500)
3522 val |= BNX2_EMAC_MODE_25G_MODE;
3525 REG_WR(bp, BNX2_EMAC_MODE, val);
3527 /* receive all multicast */
3528 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3529 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3532 REG_WR(bp, BNX2_EMAC_RX_MODE,
3533 BNX2_EMAC_RX_MODE_SORT_MODE);
3535 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3536 BNX2_RPM_SORT_USER0_MC_EN;
3537 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3538 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3539 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3540 BNX2_RPM_SORT_USER0_ENA);
3542 /* Need to enable EMAC and RPM for WOL. */
3543 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3544 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3545 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3546 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3548 val = REG_RD(bp, BNX2_RPM_CONFIG);
3549 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3550 REG_WR(bp, BNX2_RPM_CONFIG, val);
3552 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3555 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3558 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3559 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3561 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3562 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3563 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3572 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3574 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3577 /* No more memory access after this point until
3578 * device is brought back to D0.
3590 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3595 /* Request access to the flash interface. */
3596 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3597 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3598 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3599 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3605 if (j >= NVRAM_TIMEOUT_COUNT)
3612 bnx2_release_nvram_lock(struct bnx2 *bp)
3617 /* Relinquish nvram interface. */
3618 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3620 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3621 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3622 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3628 if (j >= NVRAM_TIMEOUT_COUNT)
3636 bnx2_enable_nvram_write(struct bnx2 *bp)
3640 val = REG_RD(bp, BNX2_MISC_CFG);
3641 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3643 if (bp->flash_info->flags & BNX2_NV_WREN) {
3646 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3647 REG_WR(bp, BNX2_NVM_COMMAND,
3648 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3650 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3653 val = REG_RD(bp, BNX2_NVM_COMMAND);
3654 if (val & BNX2_NVM_COMMAND_DONE)
3658 if (j >= NVRAM_TIMEOUT_COUNT)
3665 bnx2_disable_nvram_write(struct bnx2 *bp)
3669 val = REG_RD(bp, BNX2_MISC_CFG);
3670 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3675 bnx2_enable_nvram_access(struct bnx2 *bp)
3679 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3680 /* Enable both bits, even on read. */
3681 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3682 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3686 bnx2_disable_nvram_access(struct bnx2 *bp)
3690 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3691 /* Disable both bits, even after read. */
3692 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3693 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3694 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3698 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3703 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3704 /* Buffered flash, no erase needed */
3707 /* Build an erase command */
3708 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3709 BNX2_NVM_COMMAND_DOIT;
3711 /* Need to clear DONE bit separately. */
3712 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3714 /* Address of the NVRAM to read from. */
3715 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3717 /* Issue an erase command. */
3718 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3720 /* Wait for completion. */
3721 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3726 val = REG_RD(bp, BNX2_NVM_COMMAND);
3727 if (val & BNX2_NVM_COMMAND_DONE)
3731 if (j >= NVRAM_TIMEOUT_COUNT)
3738 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3743 /* Build the command word. */
3744 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3746 /* Calculate an offset of a buffered flash, not needed for 5709. */
3747 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3748 offset = ((offset / bp->flash_info->page_size) <<
3749 bp->flash_info->page_bits) +
3750 (offset % bp->flash_info->page_size);
3753 /* Need to clear DONE bit separately. */
3754 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3756 /* Address of the NVRAM to read from. */
3757 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3759 /* Issue a read command. */
3760 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3762 /* Wait for completion. */
3763 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3768 val = REG_RD(bp, BNX2_NVM_COMMAND);
3769 if (val & BNX2_NVM_COMMAND_DONE) {
3770 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3771 memcpy(ret_val, &v, 4);
3775 if (j >= NVRAM_TIMEOUT_COUNT)
3783 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3789 /* Build the command word. */
3790 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3792 /* Calculate an offset of a buffered flash, not needed for 5709. */
3793 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3794 offset = ((offset / bp->flash_info->page_size) <<
3795 bp->flash_info->page_bits) +
3796 (offset % bp->flash_info->page_size);
3799 /* Need to clear DONE bit separately. */
3800 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3802 memcpy(&val32, val, 4);
3804 /* Write the data. */
3805 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
3807 /* Address of the NVRAM to write to. */
3808 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3810 /* Issue the write command. */
3811 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3813 /* Wait for completion. */
3814 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3817 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3820 if (j >= NVRAM_TIMEOUT_COUNT)
3827 bnx2_init_nvram(struct bnx2 *bp)
3830 int j, entry_count, rc = 0;
3831 struct flash_spec *flash;
3833 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3834 bp->flash_info = &flash_5709;
3835 goto get_flash_size;
3838 /* Determine the selected interface. */
3839 val = REG_RD(bp, BNX2_NVM_CFG1);
3841 entry_count = ARRAY_SIZE(flash_table);
3843 if (val & 0x40000000) {
3845 /* Flash interface has been reconfigured */
3846 for (j = 0, flash = &flash_table[0]; j < entry_count;
3848 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3849 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3850 bp->flash_info = flash;
3857 /* Not yet been reconfigured */
3859 if (val & (1 << 23))
3860 mask = FLASH_BACKUP_STRAP_MASK;
3862 mask = FLASH_STRAP_MASK;
3864 for (j = 0, flash = &flash_table[0]; j < entry_count;
3867 if ((val & mask) == (flash->strapping & mask)) {
3868 bp->flash_info = flash;
3870 /* Request access to the flash interface. */
3871 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3874 /* Enable access to flash interface */
3875 bnx2_enable_nvram_access(bp);
3877 /* Reconfigure the flash interface */
3878 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3879 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3880 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3881 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3883 /* Disable access to flash interface */
3884 bnx2_disable_nvram_access(bp);
3885 bnx2_release_nvram_lock(bp);
3890 } /* if (val & 0x40000000) */
3892 if (j == entry_count) {
3893 bp->flash_info = NULL;
3894 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3899 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
3900 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3902 bp->flash_size = val;
3904 bp->flash_size = bp->flash_info->total_size;
3910 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3914 u32 cmd_flags, offset32, len32, extra;
3919 /* Request access to the flash interface. */
3920 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3923 /* Enable access to flash interface */
3924 bnx2_enable_nvram_access(bp);
3937 pre_len = 4 - (offset & 3);
3939 if (pre_len >= len32) {
3941 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3942 BNX2_NVM_COMMAND_LAST;
3945 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3948 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3953 memcpy(ret_buf, buf + (offset & 3), pre_len);
3960 extra = 4 - (len32 & 3);
3961 len32 = (len32 + 4) & ~3;
3968 cmd_flags = BNX2_NVM_COMMAND_LAST;
3970 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3971 BNX2_NVM_COMMAND_LAST;
3973 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3975 memcpy(ret_buf, buf, 4 - extra);
3977 else if (len32 > 0) {
3980 /* Read the first word. */
3984 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3986 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3988 /* Advance to the next dword. */
3993 while (len32 > 4 && rc == 0) {
3994 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3996 /* Advance to the next dword. */
4005 cmd_flags = BNX2_NVM_COMMAND_LAST;
4006 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4008 memcpy(ret_buf, buf, 4 - extra);
4011 /* Disable access to flash interface */
4012 bnx2_disable_nvram_access(bp);
4014 bnx2_release_nvram_lock(bp);
4020 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4023 u32 written, offset32, len32;
4024 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4026 int align_start, align_end;
4031 align_start = align_end = 0;
4033 if ((align_start = (offset32 & 3))) {
4035 len32 += align_start;
4038 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4043 align_end = 4 - (len32 & 3);
4045 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4049 if (align_start || align_end) {
4050 align_buf = kmalloc(len32, GFP_KERNEL);
4051 if (align_buf == NULL)
4054 memcpy(align_buf, start, 4);
4057 memcpy(align_buf + len32 - 4, end, 4);
4059 memcpy(align_buf + align_start, data_buf, buf_size);
4063 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4064 flash_buffer = kmalloc(264, GFP_KERNEL);
4065 if (flash_buffer == NULL) {
4067 goto nvram_write_end;
4072 while ((written < len32) && (rc == 0)) {
4073 u32 page_start, page_end, data_start, data_end;
4074 u32 addr, cmd_flags;
4077 /* Find the page_start addr */
4078 page_start = offset32 + written;
4079 page_start -= (page_start % bp->flash_info->page_size);
4080 /* Find the page_end addr */
4081 page_end = page_start + bp->flash_info->page_size;
4082 /* Find the data_start addr */
4083 data_start = (written == 0) ? offset32 : page_start;
4084 /* Find the data_end addr */
4085 data_end = (page_end > offset32 + len32) ?
4086 (offset32 + len32) : page_end;
4088 /* Request access to the flash interface. */
4089 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4090 goto nvram_write_end;
4092 /* Enable access to flash interface */
4093 bnx2_enable_nvram_access(bp);
4095 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4096 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4099 /* Read the whole page into the buffer
4100 * (non-buffer flash only) */
4101 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4102 if (j == (bp->flash_info->page_size - 4)) {
4103 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4105 rc = bnx2_nvram_read_dword(bp,
4111 goto nvram_write_end;
4117 /* Enable writes to flash interface (unlock write-protect) */
4118 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4119 goto nvram_write_end;
4121 /* Loop to write back the buffer data from page_start to
4124 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4125 /* Erase the page */
4126 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4127 goto nvram_write_end;
4129 /* Re-enable the write again for the actual write */
4130 bnx2_enable_nvram_write(bp);
4132 for (addr = page_start; addr < data_start;
4133 addr += 4, i += 4) {
4135 rc = bnx2_nvram_write_dword(bp, addr,
4136 &flash_buffer[i], cmd_flags);
4139 goto nvram_write_end;
4145 /* Loop to write the new data from data_start to data_end */
4146 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4147 if ((addr == page_end - 4) ||
4148 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4149 (addr == data_end - 4))) {
4151 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4153 rc = bnx2_nvram_write_dword(bp, addr, buf,
4157 goto nvram_write_end;
4163 /* Loop to write back the buffer data from data_end
4165 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4166 for (addr = data_end; addr < page_end;
4167 addr += 4, i += 4) {
4169 if (addr == page_end-4) {
4170 cmd_flags = BNX2_NVM_COMMAND_LAST;
4172 rc = bnx2_nvram_write_dword(bp, addr,
4173 &flash_buffer[i], cmd_flags);
4176 goto nvram_write_end;
4182 /* Disable writes to flash interface (lock write-protect) */
4183 bnx2_disable_nvram_write(bp);
4185 /* Disable access to flash interface */
4186 bnx2_disable_nvram_access(bp);
4187 bnx2_release_nvram_lock(bp);
4189 /* Increment written */
4190 written += data_end - data_start;
4194 kfree(flash_buffer);
4200 bnx2_init_remote_phy(struct bnx2 *bp)
4204 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4205 if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES))
4208 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4209 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4212 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4213 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4215 val = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4216 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4217 bp->phy_port = PORT_FIBRE;
4219 bp->phy_port = PORT_TP;
4221 if (netif_running(bp->dev)) {
4224 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4225 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4226 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4232 bnx2_setup_msix_tbl(struct bnx2 *bp)
4234 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4236 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4237 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4241 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4247 /* Wait for the current PCI transaction to complete before
4248 * issuing a reset. */
4249 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4250 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4251 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4252 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4253 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4254 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4257 /* Wait for the firmware to tell us it is ok to issue a reset. */
4258 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4260 /* Deposit a driver reset signature so the firmware knows that
4261 * this is a soft reset. */
4262 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4263 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4265 /* Do a dummy read to force the chip to complete all current transaction
4266 * before we issue a reset. */
4267 val = REG_RD(bp, BNX2_MISC_ID);
4269 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4270 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4271 REG_RD(bp, BNX2_MISC_COMMAND);
4274 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4275 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4277 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4280 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4281 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4282 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4285 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4287 /* Reading back any register after chip reset will hang the
4288 * bus on 5706 A0 and A1. The msleep below provides plenty
4289 * of margin for write posting.
4291 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4292 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4295 /* Reset takes approximate 30 usec */
4296 for (i = 0; i < 10; i++) {
4297 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4298 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4299 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4304 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4305 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4306 printk(KERN_ERR PFX "Chip reset did not complete\n");
4311 /* Make sure byte swapping is properly configured. */
4312 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4313 if (val != 0x01020304) {
4314 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4318 /* Wait for the firmware to finish its initialization. */
4319 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4323 spin_lock_bh(&bp->phy_lock);
4324 old_port = bp->phy_port;
4325 bnx2_init_remote_phy(bp);
4326 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4327 old_port != bp->phy_port)
4328 bnx2_set_default_remote_link(bp);
4329 spin_unlock_bh(&bp->phy_lock);
4331 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4332 /* Adjust the voltage regular to two steps lower. The default
4333 * of this register is 0x0000000e. */
4334 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4336 /* Remove bad rbuf memory from the free pool. */
4337 rc = bnx2_alloc_bad_rbuf(bp);
4340 if (bp->flags & BNX2_FLAG_USING_MSIX)
4341 bnx2_setup_msix_tbl(bp);
4347 bnx2_init_chip(struct bnx2 *bp)
4352 /* Make sure the interrupt is not active. */
4353 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4355 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4356 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4358 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4360 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4361 DMA_READ_CHANS << 12 |
4362 DMA_WRITE_CHANS << 16;
4364 val |= (0x2 << 20) | (1 << 11);
4366 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4369 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4370 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4371 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4373 REG_WR(bp, BNX2_DMA_CONFIG, val);
4375 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4376 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4377 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4378 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4381 if (bp->flags & BNX2_FLAG_PCIX) {
4384 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4386 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4387 val16 & ~PCI_X_CMD_ERO);
4390 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4391 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4392 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4393 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4395 /* Initialize context mapping and zero out the quick contexts. The
4396 * context block must have already been enabled. */
4397 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4398 rc = bnx2_init_5709_context(bp);
4402 bnx2_init_context(bp);
4404 if ((rc = bnx2_init_cpus(bp)) != 0)
4407 bnx2_init_nvram(bp);
4409 bnx2_set_mac_addr(bp);
4411 val = REG_RD(bp, BNX2_MQ_CONFIG);
4412 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4413 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4414 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4415 val |= BNX2_MQ_CONFIG_HALT_DIS;
4417 REG_WR(bp, BNX2_MQ_CONFIG, val);
4419 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4420 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4421 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4423 val = (BCM_PAGE_BITS - 8) << 24;
4424 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4426 /* Configure page size. */
4427 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4428 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4429 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4430 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4432 val = bp->mac_addr[0] +
4433 (bp->mac_addr[1] << 8) +
4434 (bp->mac_addr[2] << 16) +
4436 (bp->mac_addr[4] << 8) +
4437 (bp->mac_addr[5] << 16);
4438 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4440 /* Program the MTU. Also include 4 bytes for CRC32. */
4441 val = bp->dev->mtu + ETH_HLEN + 4;
4442 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4443 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4444 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4446 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4447 bp->bnx2_napi[i].last_status_idx = 0;
4449 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4451 /* Set up how to generate a link change interrupt. */
4452 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4454 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4455 (u64) bp->status_blk_mapping & 0xffffffff);
4456 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4458 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4459 (u64) bp->stats_blk_mapping & 0xffffffff);
4460 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4461 (u64) bp->stats_blk_mapping >> 32);
4463 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4464 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4466 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4467 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4469 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4470 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4472 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4474 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4476 REG_WR(bp, BNX2_HC_COM_TICKS,
4477 (bp->com_ticks_int << 16) | bp->com_ticks);
4479 REG_WR(bp, BNX2_HC_CMD_TICKS,
4480 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4482 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4483 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4485 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4486 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4488 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4489 val = BNX2_HC_CONFIG_COLLECT_STATS;
4491 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4492 BNX2_HC_CONFIG_COLLECT_STATS;
4495 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4496 u32 base = ((BNX2_TX_VEC - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4497 BNX2_HC_SB_CONFIG_1;
4499 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4500 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4503 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4504 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4506 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4507 (bp->tx_quick_cons_trip_int << 16) |
4508 bp->tx_quick_cons_trip);
4510 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4511 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4513 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4516 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4517 val |= BNX2_HC_CONFIG_ONE_SHOT;
4519 REG_WR(bp, BNX2_HC_CONFIG, val);
4521 /* Clear internal stats counters. */
4522 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4524 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4526 /* Initialize the receive filter. */
4527 bnx2_set_rx_mode(bp->dev);
4529 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4530 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4531 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4532 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4534 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4537 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4538 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4542 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4548 bnx2_clear_ring_states(struct bnx2 *bp)
4550 struct bnx2_napi *bnapi;
4553 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4554 bnapi = &bp->bnx2_napi[i];
4557 bnapi->hw_tx_cons = 0;
4558 bnapi->rx_prod_bseq = 0;
4561 bnapi->rx_pg_prod = 0;
4562 bnapi->rx_pg_cons = 0;
4567 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4569 u32 val, offset0, offset1, offset2, offset3;
4570 u32 cid_addr = GET_CID_ADDR(cid);
4572 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4573 offset0 = BNX2_L2CTX_TYPE_XI;
4574 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4575 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4576 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4578 offset0 = BNX2_L2CTX_TYPE;
4579 offset1 = BNX2_L2CTX_CMD_TYPE;
4580 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4581 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4583 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4584 bnx2_ctx_wr(bp, cid_addr, offset0, val);
4586 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4587 bnx2_ctx_wr(bp, cid_addr, offset1, val);
4589 val = (u64) bp->tx_desc_mapping >> 32;
4590 bnx2_ctx_wr(bp, cid_addr, offset2, val);
4592 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4593 bnx2_ctx_wr(bp, cid_addr, offset3, val);
4597 bnx2_init_tx_ring(struct bnx2 *bp)
4601 struct bnx2_napi *bnapi;
4604 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4606 bp->tx_vec = BNX2_TX_VEC;
4607 REG_WR(bp, BNX2_TSCH_TSS_CFG, BNX2_TX_INT_NUM |
4610 bnapi = &bp->bnx2_napi[bp->tx_vec];
4612 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4614 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4616 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4617 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4620 bp->tx_prod_bseq = 0;
4622 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4623 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4625 bnx2_init_tx_context(bp, cid);
4629 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4635 for (i = 0; i < num_rings; i++) {
4638 rxbd = &rx_ring[i][0];
4639 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4640 rxbd->rx_bd_len = buf_size;
4641 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4643 if (i == (num_rings - 1))
4647 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4648 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4653 bnx2_init_rx_ring(struct bnx2 *bp)
4656 u16 prod, ring_prod;
4657 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4658 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
4660 bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4661 bp->rx_buf_use_size, bp->rx_max_ring);
4663 bnx2_init_rx_context0(bp);
4665 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4666 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4667 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4670 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4671 if (bp->rx_pg_ring_size) {
4672 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4673 bp->rx_pg_desc_mapping,
4674 PAGE_SIZE, bp->rx_max_pg_ring);
4675 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4676 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4677 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4678 BNX2_L2CTX_RBDC_JUMBO_KEY);
4680 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4681 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4683 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4684 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4686 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4687 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4690 val = (u64) bp->rx_desc_mapping[0] >> 32;
4691 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4693 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4694 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4696 ring_prod = prod = bnapi->rx_pg_prod;
4697 for (i = 0; i < bp->rx_pg_ring_size; i++) {
4698 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4700 prod = NEXT_RX_BD(prod);
4701 ring_prod = RX_PG_RING_IDX(prod);
4703 bnapi->rx_pg_prod = prod;
4705 ring_prod = prod = bnapi->rx_prod;
4706 for (i = 0; i < bp->rx_ring_size; i++) {
4707 if (bnx2_alloc_rx_skb(bp, bnapi, ring_prod) < 0) {
4710 prod = NEXT_RX_BD(prod);
4711 ring_prod = RX_RING_IDX(prod);
4713 bnapi->rx_prod = prod;
4715 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
4717 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4719 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
4722 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4724 u32 max, num_rings = 1;
4726 while (ring_size > MAX_RX_DESC_CNT) {
4727 ring_size -= MAX_RX_DESC_CNT;
4730 /* round to next power of 2 */
4732 while ((max & num_rings) == 0)
4735 if (num_rings != max)
4742 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4744 u32 rx_size, rx_space, jumbo_size;
4746 /* 8 for CRC and VLAN */
4747 rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4749 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4750 sizeof(struct skb_shared_info);
4752 bp->rx_copy_thresh = RX_COPY_THRESH;
4753 bp->rx_pg_ring_size = 0;
4754 bp->rx_max_pg_ring = 0;
4755 bp->rx_max_pg_ring_idx = 0;
4756 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
4757 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4759 jumbo_size = size * pages;
4760 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4761 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4763 bp->rx_pg_ring_size = jumbo_size;
4764 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4766 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4767 rx_size = RX_COPY_THRESH + bp->rx_offset;
4768 bp->rx_copy_thresh = 0;
4771 bp->rx_buf_use_size = rx_size;
4773 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4774 bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
4775 bp->rx_ring_size = size;
4776 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4777 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4781 bnx2_free_tx_skbs(struct bnx2 *bp)
4785 if (bp->tx_buf_ring == NULL)
4788 for (i = 0; i < TX_DESC_CNT; ) {
4789 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4790 struct sk_buff *skb = tx_buf->skb;
4798 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4799 skb_headlen(skb), PCI_DMA_TODEVICE);
4803 last = skb_shinfo(skb)->nr_frags;
4804 for (j = 0; j < last; j++) {
4805 tx_buf = &bp->tx_buf_ring[i + j + 1];
4806 pci_unmap_page(bp->pdev,
4807 pci_unmap_addr(tx_buf, mapping),
4808 skb_shinfo(skb)->frags[j].size,
4818 bnx2_free_rx_skbs(struct bnx2 *bp)
4822 if (bp->rx_buf_ring == NULL)
4825 for (i = 0; i < bp->rx_max_ring_idx; i++) {
4826 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4827 struct sk_buff *skb = rx_buf->skb;
4832 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4833 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4839 for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4840 bnx2_free_rx_page(bp, i);
4844 bnx2_free_skbs(struct bnx2 *bp)
4846 bnx2_free_tx_skbs(bp);
4847 bnx2_free_rx_skbs(bp);
4851 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4855 rc = bnx2_reset_chip(bp, reset_code);
4860 if ((rc = bnx2_init_chip(bp)) != 0)
4863 bnx2_clear_ring_states(bp);
4864 bnx2_init_tx_ring(bp);
4865 bnx2_init_rx_ring(bp);
4870 bnx2_init_nic(struct bnx2 *bp)
4874 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4877 spin_lock_bh(&bp->phy_lock);
4880 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
4881 bnx2_remote_phy_event(bp);
4882 spin_unlock_bh(&bp->phy_lock);
4887 bnx2_test_registers(struct bnx2 *bp)
4891 static const struct {
4894 #define BNX2_FL_NOT_5709 1
4898 { 0x006c, 0, 0x00000000, 0x0000003f },
4899 { 0x0090, 0, 0xffffffff, 0x00000000 },
4900 { 0x0094, 0, 0x00000000, 0x00000000 },
4902 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4903 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4904 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4905 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4906 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4907 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4908 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4909 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4910 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4912 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4913 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4914 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4915 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4916 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4917 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4919 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4920 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4921 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4923 { 0x1000, 0, 0x00000000, 0x00000001 },
4924 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
4926 { 0x1408, 0, 0x01c00800, 0x00000000 },
4927 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4928 { 0x14a8, 0, 0x00000000, 0x000001ff },
4929 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4930 { 0x14b0, 0, 0x00000002, 0x00000001 },
4931 { 0x14b8, 0, 0x00000000, 0x00000000 },
4932 { 0x14c0, 0, 0x00000000, 0x00000009 },
4933 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4934 { 0x14cc, 0, 0x00000000, 0x00000001 },
4935 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4937 { 0x1800, 0, 0x00000000, 0x00000001 },
4938 { 0x1804, 0, 0x00000000, 0x00000003 },
4940 { 0x2800, 0, 0x00000000, 0x00000001 },
4941 { 0x2804, 0, 0x00000000, 0x00003f01 },
4942 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4943 { 0x2810, 0, 0xffff0000, 0x00000000 },
4944 { 0x2814, 0, 0xffff0000, 0x00000000 },
4945 { 0x2818, 0, 0xffff0000, 0x00000000 },
4946 { 0x281c, 0, 0xffff0000, 0x00000000 },
4947 { 0x2834, 0, 0xffffffff, 0x00000000 },
4948 { 0x2840, 0, 0x00000000, 0xffffffff },
4949 { 0x2844, 0, 0x00000000, 0xffffffff },
4950 { 0x2848, 0, 0xffffffff, 0x00000000 },
4951 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4953 { 0x2c00, 0, 0x00000000, 0x00000011 },
4954 { 0x2c04, 0, 0x00000000, 0x00030007 },
4956 { 0x3c00, 0, 0x00000000, 0x00000001 },
4957 { 0x3c04, 0, 0x00000000, 0x00070000 },
4958 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4959 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4960 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4961 { 0x3c14, 0, 0x00000000, 0xffffffff },
4962 { 0x3c18, 0, 0x00000000, 0xffffffff },
4963 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4964 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4966 { 0x5004, 0, 0x00000000, 0x0000007f },
4967 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4969 { 0x5c00, 0, 0x00000000, 0x00000001 },
4970 { 0x5c04, 0, 0x00000000, 0x0003000f },
4971 { 0x5c08, 0, 0x00000003, 0x00000000 },
4972 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4973 { 0x5c10, 0, 0x00000000, 0xffffffff },
4974 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4975 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4976 { 0x5c88, 0, 0x00000000, 0x00077373 },
4977 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4979 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4980 { 0x680c, 0, 0xffffffff, 0x00000000 },
4981 { 0x6810, 0, 0xffffffff, 0x00000000 },
4982 { 0x6814, 0, 0xffffffff, 0x00000000 },
4983 { 0x6818, 0, 0xffffffff, 0x00000000 },
4984 { 0x681c, 0, 0xffffffff, 0x00000000 },
4985 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4986 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4987 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4988 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4989 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4990 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4991 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4992 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4993 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4994 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4995 { 0x684c, 0, 0xffffffff, 0x00000000 },
4996 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4997 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4998 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4999 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5000 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5001 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5003 { 0xffff, 0, 0x00000000, 0x00000000 },
5008 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5011 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5012 u32 offset, rw_mask, ro_mask, save_val, val;
5013 u16 flags = reg_tbl[i].flags;
5015 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5018 offset = (u32) reg_tbl[i].offset;
5019 rw_mask = reg_tbl[i].rw_mask;
5020 ro_mask = reg_tbl[i].ro_mask;
5022 save_val = readl(bp->regview + offset);
5024 writel(0, bp->regview + offset);
5026 val = readl(bp->regview + offset);
5027 if ((val & rw_mask) != 0) {
5031 if ((val & ro_mask) != (save_val & ro_mask)) {
5035 writel(0xffffffff, bp->regview + offset);
5037 val = readl(bp->regview + offset);
5038 if ((val & rw_mask) != rw_mask) {
5042 if ((val & ro_mask) != (save_val & ro_mask)) {
5046 writel(save_val, bp->regview + offset);
5050 writel(save_val, bp->regview + offset);
5058 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5060 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5061 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5064 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5067 for (offset = 0; offset < size; offset += 4) {
5069 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5071 if (bnx2_reg_rd_ind(bp, start + offset) !=
5081 bnx2_test_memory(struct bnx2 *bp)
5085 static struct mem_entry {
5088 } mem_tbl_5706[] = {
5089 { 0x60000, 0x4000 },
5090 { 0xa0000, 0x3000 },
5091 { 0xe0000, 0x4000 },
5092 { 0x120000, 0x4000 },
5093 { 0x1a0000, 0x4000 },
5094 { 0x160000, 0x4000 },
5098 { 0x60000, 0x4000 },
5099 { 0xa0000, 0x3000 },
5100 { 0xe0000, 0x4000 },
5101 { 0x120000, 0x4000 },
5102 { 0x1a0000, 0x4000 },
5105 struct mem_entry *mem_tbl;
5107 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5108 mem_tbl = mem_tbl_5709;
5110 mem_tbl = mem_tbl_5706;
5112 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5113 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5114 mem_tbl[i].len)) != 0) {
5122 #define BNX2_MAC_LOOPBACK 0
5123 #define BNX2_PHY_LOOPBACK 1
5126 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5128 unsigned int pkt_size, num_pkts, i;
5129 struct sk_buff *skb, *rx_skb;
5130 unsigned char *packet;
5131 u16 rx_start_idx, rx_idx;
5134 struct sw_bd *rx_buf;
5135 struct l2_fhdr *rx_hdr;
5137 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5140 if (bp->flags & BNX2_FLAG_USING_MSIX)
5141 tx_napi = &bp->bnx2_napi[BNX2_TX_VEC];
5143 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5144 bp->loopback = MAC_LOOPBACK;
5145 bnx2_set_mac_loopback(bp);
5147 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5148 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5151 bp->loopback = PHY_LOOPBACK;
5152 bnx2_set_phy_loopback(bp);
5157 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5158 skb = netdev_alloc_skb(bp->dev, pkt_size);
5161 packet = skb_put(skb, pkt_size);
5162 memcpy(packet, bp->dev->dev_addr, 6);
5163 memset(packet + 6, 0x0, 8);
5164 for (i = 14; i < pkt_size; i++)
5165 packet[i] = (unsigned char) (i & 0xff);
5167 map = pci_map_single(bp->pdev, skb->data, pkt_size,
5170 REG_WR(bp, BNX2_HC_COMMAND,
5171 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5173 REG_RD(bp, BNX2_HC_COMMAND);
5176 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5180 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
5182 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5183 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5184 txbd->tx_bd_mss_nbytes = pkt_size;
5185 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5188 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
5189 bp->tx_prod_bseq += pkt_size;
5191 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
5192 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5196 REG_WR(bp, BNX2_HC_COMMAND,
5197 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5199 REG_RD(bp, BNX2_HC_COMMAND);
5203 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5206 if (bnx2_get_hw_tx_cons(tx_napi) != bp->tx_prod)
5207 goto loopback_test_done;
5209 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5210 if (rx_idx != rx_start_idx + num_pkts) {
5211 goto loopback_test_done;
5214 rx_buf = &bp->rx_buf_ring[rx_start_idx];
5215 rx_skb = rx_buf->skb;
5217 rx_hdr = (struct l2_fhdr *) rx_skb->data;
5218 skb_reserve(rx_skb, bp->rx_offset);
5220 pci_dma_sync_single_for_cpu(bp->pdev,
5221 pci_unmap_addr(rx_buf, mapping),
5222 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5224 if (rx_hdr->l2_fhdr_status &
5225 (L2_FHDR_ERRORS_BAD_CRC |
5226 L2_FHDR_ERRORS_PHY_DECODE |
5227 L2_FHDR_ERRORS_ALIGNMENT |
5228 L2_FHDR_ERRORS_TOO_SHORT |
5229 L2_FHDR_ERRORS_GIANT_FRAME)) {
5231 goto loopback_test_done;
5234 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5235 goto loopback_test_done;
5238 for (i = 14; i < pkt_size; i++) {
5239 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5240 goto loopback_test_done;
5251 #define BNX2_MAC_LOOPBACK_FAILED 1
5252 #define BNX2_PHY_LOOPBACK_FAILED 2
5253 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5254 BNX2_PHY_LOOPBACK_FAILED)
5257 bnx2_test_loopback(struct bnx2 *bp)
5261 if (!netif_running(bp->dev))
5262 return BNX2_LOOPBACK_FAILED;
5264 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5265 spin_lock_bh(&bp->phy_lock);
5267 spin_unlock_bh(&bp->phy_lock);
5268 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5269 rc |= BNX2_MAC_LOOPBACK_FAILED;
5270 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5271 rc |= BNX2_PHY_LOOPBACK_FAILED;
5275 #define NVRAM_SIZE 0x200
5276 #define CRC32_RESIDUAL 0xdebb20e3
5279 bnx2_test_nvram(struct bnx2 *bp)
5281 __be32 buf[NVRAM_SIZE / 4];
5282 u8 *data = (u8 *) buf;
5286 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5287 goto test_nvram_done;
5289 magic = be32_to_cpu(buf[0]);
5290 if (magic != 0x669955aa) {
5292 goto test_nvram_done;
5295 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5296 goto test_nvram_done;
5298 csum = ether_crc_le(0x100, data);
5299 if (csum != CRC32_RESIDUAL) {
5301 goto test_nvram_done;
5304 csum = ether_crc_le(0x100, data + 0x100);
5305 if (csum != CRC32_RESIDUAL) {
5314 bnx2_test_link(struct bnx2 *bp)
5318 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5323 spin_lock_bh(&bp->phy_lock);
5324 bnx2_enable_bmsr1(bp);
5325 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5326 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5327 bnx2_disable_bmsr1(bp);
5328 spin_unlock_bh(&bp->phy_lock);
5330 if (bmsr & BMSR_LSTATUS) {
5337 bnx2_test_intr(struct bnx2 *bp)
5342 if (!netif_running(bp->dev))
5345 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5347 /* This register is not touched during run-time. */
5348 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5349 REG_RD(bp, BNX2_HC_COMMAND);
5351 for (i = 0; i < 10; i++) {
5352 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5358 msleep_interruptible(10);
5366 /* Determining link for parallel detection. */
5368 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5370 u32 mode_ctl, an_dbg, exp;
5372 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5375 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5376 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5378 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5381 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5382 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5383 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5385 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5388 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5389 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5390 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5392 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5399 bnx2_5706_serdes_timer(struct bnx2 *bp)
5403 spin_lock(&bp->phy_lock);
5404 if (bp->serdes_an_pending) {
5405 bp->serdes_an_pending--;
5407 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5410 bp->current_interval = bp->timer_interval;
5412 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5414 if (bmcr & BMCR_ANENABLE) {
5415 if (bnx2_5706_serdes_has_link(bp)) {
5416 bmcr &= ~BMCR_ANENABLE;
5417 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5418 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5419 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5423 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5424 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5427 bnx2_write_phy(bp, 0x17, 0x0f01);
5428 bnx2_read_phy(bp, 0x15, &phy2);
5432 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5433 bmcr |= BMCR_ANENABLE;
5434 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5436 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5439 bp->current_interval = bp->timer_interval;
5444 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5445 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5446 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5448 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5449 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5450 bnx2_5706s_force_link_dn(bp, 1);
5451 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5454 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5457 spin_unlock(&bp->phy_lock);
5461 bnx2_5708_serdes_timer(struct bnx2 *bp)
5463 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5466 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5467 bp->serdes_an_pending = 0;
5471 spin_lock(&bp->phy_lock);
5472 if (bp->serdes_an_pending)
5473 bp->serdes_an_pending--;
5474 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5477 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5478 if (bmcr & BMCR_ANENABLE) {
5479 bnx2_enable_forced_2g5(bp);
5480 bp->current_interval = SERDES_FORCED_TIMEOUT;
5482 bnx2_disable_forced_2g5(bp);
5483 bp->serdes_an_pending = 2;
5484 bp->current_interval = bp->timer_interval;
5488 bp->current_interval = bp->timer_interval;
5490 spin_unlock(&bp->phy_lock);
5494 bnx2_timer(unsigned long data)
5496 struct bnx2 *bp = (struct bnx2 *) data;
5498 if (!netif_running(bp->dev))
5501 if (atomic_read(&bp->intr_sem) != 0)
5502 goto bnx2_restart_timer;
5504 bnx2_send_heart_beat(bp);
5506 bp->stats_blk->stat_FwRxDrop =
5507 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5509 /* workaround occasional corrupted counters */
5510 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5511 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5512 BNX2_HC_COMMAND_STATS_NOW);
5514 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5515 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5516 bnx2_5706_serdes_timer(bp);
5518 bnx2_5708_serdes_timer(bp);
5522 mod_timer(&bp->timer, jiffies + bp->current_interval);
5526 bnx2_request_irq(struct bnx2 *bp)
5528 struct net_device *dev = bp->dev;
5529 unsigned long flags;
5530 struct bnx2_irq *irq;
5533 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5536 flags = IRQF_SHARED;
5538 for (i = 0; i < bp->irq_nvecs; i++) {
5539 irq = &bp->irq_tbl[i];
5540 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5550 bnx2_free_irq(struct bnx2 *bp)
5552 struct net_device *dev = bp->dev;
5553 struct bnx2_irq *irq;
5556 for (i = 0; i < bp->irq_nvecs; i++) {
5557 irq = &bp->irq_tbl[i];
5559 free_irq(irq->vector, dev);
5562 if (bp->flags & BNX2_FLAG_USING_MSI)
5563 pci_disable_msi(bp->pdev);
5564 else if (bp->flags & BNX2_FLAG_USING_MSIX)
5565 pci_disable_msix(bp->pdev);
5567 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5571 bnx2_enable_msix(struct bnx2 *bp)
5574 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5576 bnx2_setup_msix_tbl(bp);
5577 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5578 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5579 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5581 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5582 msix_ent[i].entry = i;
5583 msix_ent[i].vector = 0;
5586 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5590 bp->irq_tbl[BNX2_BASE_VEC].handler = bnx2_msi_1shot;
5591 bp->irq_tbl[BNX2_TX_VEC].handler = bnx2_tx_msix;
5593 strcpy(bp->irq_tbl[BNX2_BASE_VEC].name, bp->dev->name);
5594 strcat(bp->irq_tbl[BNX2_BASE_VEC].name, "-base");
5595 strcpy(bp->irq_tbl[BNX2_TX_VEC].name, bp->dev->name);
5596 strcat(bp->irq_tbl[BNX2_TX_VEC].name, "-tx");
5598 bp->irq_nvecs = BNX2_MAX_MSIX_VEC;
5599 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5600 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5601 bp->irq_tbl[i].vector = msix_ent[i].vector;
5605 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5607 bp->irq_tbl[0].handler = bnx2_interrupt;
5608 strcpy(bp->irq_tbl[0].name, bp->dev->name);
5610 bp->irq_tbl[0].vector = bp->pdev->irq;
5612 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
5613 bnx2_enable_msix(bp);
5615 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5616 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5617 if (pci_enable_msi(bp->pdev) == 0) {
5618 bp->flags |= BNX2_FLAG_USING_MSI;
5619 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5620 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5621 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5623 bp->irq_tbl[0].handler = bnx2_msi;
5625 bp->irq_tbl[0].vector = bp->pdev->irq;
5630 /* Called with rtnl_lock */
5632 bnx2_open(struct net_device *dev)
5634 struct bnx2 *bp = netdev_priv(dev);
5637 netif_carrier_off(dev);
5639 bnx2_set_power_state(bp, PCI_D0);
5640 bnx2_disable_int(bp);
5642 rc = bnx2_alloc_mem(bp);
5646 bnx2_setup_int_mode(bp, disable_msi);
5647 bnx2_napi_enable(bp);
5648 rc = bnx2_request_irq(bp);
5651 bnx2_napi_disable(bp);
5656 rc = bnx2_init_nic(bp);
5659 bnx2_napi_disable(bp);
5666 mod_timer(&bp->timer, jiffies + bp->current_interval);
5668 atomic_set(&bp->intr_sem, 0);
5670 bnx2_enable_int(bp);
5672 if (bp->flags & BNX2_FLAG_USING_MSI) {
5673 /* Test MSI to make sure it is working
5674 * If MSI test fails, go back to INTx mode
5676 if (bnx2_test_intr(bp) != 0) {
5677 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5678 " using MSI, switching to INTx mode. Please"
5679 " report this failure to the PCI maintainer"
5680 " and include system chipset information.\n",
5683 bnx2_disable_int(bp);
5686 bnx2_setup_int_mode(bp, 1);
5688 rc = bnx2_init_nic(bp);
5691 rc = bnx2_request_irq(bp);
5694 bnx2_napi_disable(bp);
5697 del_timer_sync(&bp->timer);
5700 bnx2_enable_int(bp);
5703 if (bp->flags & BNX2_FLAG_USING_MSI)
5704 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5705 else if (bp->flags & BNX2_FLAG_USING_MSIX)
5706 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5708 netif_start_queue(dev);
5714 bnx2_reset_task(struct work_struct *work)
5716 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5718 if (!netif_running(bp->dev))
5721 bp->in_reset_task = 1;
5722 bnx2_netif_stop(bp);
5726 atomic_set(&bp->intr_sem, 1);
5727 bnx2_netif_start(bp);
5728 bp->in_reset_task = 0;
5732 bnx2_tx_timeout(struct net_device *dev)
5734 struct bnx2 *bp = netdev_priv(dev);
5736 /* This allows the netif to be shutdown gracefully before resetting */
5737 schedule_work(&bp->reset_task);
5741 /* Called with rtnl_lock */
5743 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5745 struct bnx2 *bp = netdev_priv(dev);
5747 bnx2_netif_stop(bp);
5750 bnx2_set_rx_mode(dev);
5752 bnx2_netif_start(bp);
5756 /* Called with netif_tx_lock.
5757 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5758 * netif_wake_queue().
5761 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5763 struct bnx2 *bp = netdev_priv(dev);
5766 struct sw_bd *tx_buf;
5767 u32 len, vlan_tag_flags, last_frag, mss;
5768 u16 prod, ring_prod;
5770 struct bnx2_napi *bnapi = &bp->bnx2_napi[bp->tx_vec];
5772 if (unlikely(bnx2_tx_avail(bp, bnapi) <
5773 (skb_shinfo(skb)->nr_frags + 1))) {
5774 netif_stop_queue(dev);
5775 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5778 return NETDEV_TX_BUSY;
5780 len = skb_headlen(skb);
5782 ring_prod = TX_RING_IDX(prod);
5785 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5786 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5789 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
5791 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5793 if ((mss = skb_shinfo(skb)->gso_size)) {
5794 u32 tcp_opt_len, ip_tcp_len;
5797 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5799 tcp_opt_len = tcp_optlen(skb);
5801 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5802 u32 tcp_off = skb_transport_offset(skb) -
5803 sizeof(struct ipv6hdr) - ETH_HLEN;
5805 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5806 TX_BD_FLAGS_SW_FLAGS;
5807 if (likely(tcp_off == 0))
5808 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5811 vlan_tag_flags |= ((tcp_off & 0x3) <<
5812 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5813 ((tcp_off & 0x10) <<
5814 TX_BD_FLAGS_TCP6_OFF4_SHL);
5815 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5818 if (skb_header_cloned(skb) &&
5819 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5821 return NETDEV_TX_OK;
5824 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5828 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5829 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5833 if (tcp_opt_len || (iph->ihl > 5)) {
5834 vlan_tag_flags |= ((iph->ihl - 5) +
5835 (tcp_opt_len >> 2)) << 8;
5841 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5843 tx_buf = &bp->tx_buf_ring[ring_prod];
5845 pci_unmap_addr_set(tx_buf, mapping, mapping);
5847 txbd = &bp->tx_desc_ring[ring_prod];
5849 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5850 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5851 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5852 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5854 last_frag = skb_shinfo(skb)->nr_frags;
5856 for (i = 0; i < last_frag; i++) {
5857 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5859 prod = NEXT_TX_BD(prod);
5860 ring_prod = TX_RING_IDX(prod);
5861 txbd = &bp->tx_desc_ring[ring_prod];
5864 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5865 len, PCI_DMA_TODEVICE);
5866 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5869 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5870 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5871 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5872 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5875 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5877 prod = NEXT_TX_BD(prod);
5878 bp->tx_prod_bseq += skb->len;
5880 REG_WR16(bp, bp->tx_bidx_addr, prod);
5881 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5886 dev->trans_start = jiffies;
5888 if (unlikely(bnx2_tx_avail(bp, bnapi) <= MAX_SKB_FRAGS)) {
5889 netif_stop_queue(dev);
5890 if (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)
5891 netif_wake_queue(dev);
5894 return NETDEV_TX_OK;
5897 /* Called with rtnl_lock */
5899 bnx2_close(struct net_device *dev)
5901 struct bnx2 *bp = netdev_priv(dev);
5904 /* Calling flush_scheduled_work() may deadlock because
5905 * linkwatch_event() may be on the workqueue and it will try to get
5906 * the rtnl_lock which we are holding.
5908 while (bp->in_reset_task)
5911 bnx2_disable_int_sync(bp);
5912 bnx2_napi_disable(bp);
5913 del_timer_sync(&bp->timer);
5914 if (bp->flags & BNX2_FLAG_NO_WOL)
5915 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5917 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5919 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5920 bnx2_reset_chip(bp, reset_code);
5925 netif_carrier_off(bp->dev);
5926 bnx2_set_power_state(bp, PCI_D3hot);
5930 #define GET_NET_STATS64(ctr) \
5931 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5932 (unsigned long) (ctr##_lo)
5934 #define GET_NET_STATS32(ctr) \
5937 #if (BITS_PER_LONG == 64)
5938 #define GET_NET_STATS GET_NET_STATS64
5940 #define GET_NET_STATS GET_NET_STATS32
5943 static struct net_device_stats *
5944 bnx2_get_stats(struct net_device *dev)
5946 struct bnx2 *bp = netdev_priv(dev);
5947 struct statistics_block *stats_blk = bp->stats_blk;
5948 struct net_device_stats *net_stats = &bp->net_stats;
5950 if (bp->stats_blk == NULL) {
5953 net_stats->rx_packets =
5954 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5955 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5956 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5958 net_stats->tx_packets =
5959 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5960 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5961 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5963 net_stats->rx_bytes =
5964 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5966 net_stats->tx_bytes =
5967 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5969 net_stats->multicast =
5970 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5972 net_stats->collisions =
5973 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5975 net_stats->rx_length_errors =
5976 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5977 stats_blk->stat_EtherStatsOverrsizePkts);
5979 net_stats->rx_over_errors =
5980 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5982 net_stats->rx_frame_errors =
5983 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5985 net_stats->rx_crc_errors =
5986 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5988 net_stats->rx_errors = net_stats->rx_length_errors +
5989 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5990 net_stats->rx_crc_errors;
5992 net_stats->tx_aborted_errors =
5993 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5994 stats_blk->stat_Dot3StatsLateCollisions);
5996 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5997 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5998 net_stats->tx_carrier_errors = 0;
6000 net_stats->tx_carrier_errors =
6002 stats_blk->stat_Dot3StatsCarrierSenseErrors;
6005 net_stats->tx_errors =
6007 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6009 net_stats->tx_aborted_errors +
6010 net_stats->tx_carrier_errors;
6012 net_stats->rx_missed_errors =
6013 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6014 stats_blk->stat_FwRxDrop);
6019 /* All ethtool functions called with rtnl_lock */
6022 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6024 struct bnx2 *bp = netdev_priv(dev);
6025 int support_serdes = 0, support_copper = 0;
6027 cmd->supported = SUPPORTED_Autoneg;
6028 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6031 } else if (bp->phy_port == PORT_FIBRE)
6036 if (support_serdes) {
6037 cmd->supported |= SUPPORTED_1000baseT_Full |
6039 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6040 cmd->supported |= SUPPORTED_2500baseX_Full;
6043 if (support_copper) {
6044 cmd->supported |= SUPPORTED_10baseT_Half |
6045 SUPPORTED_10baseT_Full |
6046 SUPPORTED_100baseT_Half |
6047 SUPPORTED_100baseT_Full |
6048 SUPPORTED_1000baseT_Full |
6053 spin_lock_bh(&bp->phy_lock);
6054 cmd->port = bp->phy_port;
6055 cmd->advertising = bp->advertising;
6057 if (bp->autoneg & AUTONEG_SPEED) {
6058 cmd->autoneg = AUTONEG_ENABLE;
6061 cmd->autoneg = AUTONEG_DISABLE;
6064 if (netif_carrier_ok(dev)) {
6065 cmd->speed = bp->line_speed;
6066 cmd->duplex = bp->duplex;
6072 spin_unlock_bh(&bp->phy_lock);
6074 cmd->transceiver = XCVR_INTERNAL;
6075 cmd->phy_address = bp->phy_addr;
6081 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6083 struct bnx2 *bp = netdev_priv(dev);
6084 u8 autoneg = bp->autoneg;
6085 u8 req_duplex = bp->req_duplex;
6086 u16 req_line_speed = bp->req_line_speed;
6087 u32 advertising = bp->advertising;
6090 spin_lock_bh(&bp->phy_lock);
6092 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6093 goto err_out_unlock;
6095 if (cmd->port != bp->phy_port &&
6096 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6097 goto err_out_unlock;
6099 if (cmd->autoneg == AUTONEG_ENABLE) {
6100 autoneg |= AUTONEG_SPEED;
6102 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6104 /* allow advertising 1 speed */
6105 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6106 (cmd->advertising == ADVERTISED_10baseT_Full) ||
6107 (cmd->advertising == ADVERTISED_100baseT_Half) ||
6108 (cmd->advertising == ADVERTISED_100baseT_Full)) {
6110 if (cmd->port == PORT_FIBRE)
6111 goto err_out_unlock;
6113 advertising = cmd->advertising;
6115 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6116 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6117 (cmd->port == PORT_TP))
6118 goto err_out_unlock;
6119 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6120 advertising = cmd->advertising;
6121 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6122 goto err_out_unlock;
6124 if (cmd->port == PORT_FIBRE)
6125 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6127 advertising = ETHTOOL_ALL_COPPER_SPEED;
6129 advertising |= ADVERTISED_Autoneg;
6132 if (cmd->port == PORT_FIBRE) {
6133 if ((cmd->speed != SPEED_1000 &&
6134 cmd->speed != SPEED_2500) ||
6135 (cmd->duplex != DUPLEX_FULL))
6136 goto err_out_unlock;
6138 if (cmd->speed == SPEED_2500 &&
6139 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6140 goto err_out_unlock;
6142 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6143 goto err_out_unlock;
6145 autoneg &= ~AUTONEG_SPEED;
6146 req_line_speed = cmd->speed;
6147 req_duplex = cmd->duplex;
6151 bp->autoneg = autoneg;
6152 bp->advertising = advertising;
6153 bp->req_line_speed = req_line_speed;
6154 bp->req_duplex = req_duplex;
6156 err = bnx2_setup_phy(bp, cmd->port);
6159 spin_unlock_bh(&bp->phy_lock);
6165 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6167 struct bnx2 *bp = netdev_priv(dev);
6169 strcpy(info->driver, DRV_MODULE_NAME);
6170 strcpy(info->version, DRV_MODULE_VERSION);
6171 strcpy(info->bus_info, pci_name(bp->pdev));
6172 strcpy(info->fw_version, bp->fw_version);
6175 #define BNX2_REGDUMP_LEN (32 * 1024)
6178 bnx2_get_regs_len(struct net_device *dev)
6180 return BNX2_REGDUMP_LEN;
6184 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6186 u32 *p = _p, i, offset;
6188 struct bnx2 *bp = netdev_priv(dev);
6189 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6190 0x0800, 0x0880, 0x0c00, 0x0c10,
6191 0x0c30, 0x0d08, 0x1000, 0x101c,
6192 0x1040, 0x1048, 0x1080, 0x10a4,
6193 0x1400, 0x1490, 0x1498, 0x14f0,
6194 0x1500, 0x155c, 0x1580, 0x15dc,
6195 0x1600, 0x1658, 0x1680, 0x16d8,
6196 0x1800, 0x1820, 0x1840, 0x1854,
6197 0x1880, 0x1894, 0x1900, 0x1984,
6198 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6199 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6200 0x2000, 0x2030, 0x23c0, 0x2400,
6201 0x2800, 0x2820, 0x2830, 0x2850,
6202 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6203 0x3c00, 0x3c94, 0x4000, 0x4010,
6204 0x4080, 0x4090, 0x43c0, 0x4458,
6205 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6206 0x4fc0, 0x5010, 0x53c0, 0x5444,
6207 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6208 0x5fc0, 0x6000, 0x6400, 0x6428,
6209 0x6800, 0x6848, 0x684c, 0x6860,
6210 0x6888, 0x6910, 0x8000 };
6214 memset(p, 0, BNX2_REGDUMP_LEN);
6216 if (!netif_running(bp->dev))
6220 offset = reg_boundaries[0];
6222 while (offset < BNX2_REGDUMP_LEN) {
6223 *p++ = REG_RD(bp, offset);
6225 if (offset == reg_boundaries[i + 1]) {
6226 offset = reg_boundaries[i + 2];
6227 p = (u32 *) (orig_p + offset);
6234 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6236 struct bnx2 *bp = netdev_priv(dev);
6238 if (bp->flags & BNX2_FLAG_NO_WOL) {
6243 wol->supported = WAKE_MAGIC;
6245 wol->wolopts = WAKE_MAGIC;
6249 memset(&wol->sopass, 0, sizeof(wol->sopass));
6253 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6255 struct bnx2 *bp = netdev_priv(dev);
6257 if (wol->wolopts & ~WAKE_MAGIC)
6260 if (wol->wolopts & WAKE_MAGIC) {
6261 if (bp->flags & BNX2_FLAG_NO_WOL)
6273 bnx2_nway_reset(struct net_device *dev)
6275 struct bnx2 *bp = netdev_priv(dev);
6278 if (!(bp->autoneg & AUTONEG_SPEED)) {
6282 spin_lock_bh(&bp->phy_lock);
6284 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6287 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6288 spin_unlock_bh(&bp->phy_lock);
6292 /* Force a link down visible on the other side */
6293 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6294 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6295 spin_unlock_bh(&bp->phy_lock);
6299 spin_lock_bh(&bp->phy_lock);
6301 bp->current_interval = SERDES_AN_TIMEOUT;
6302 bp->serdes_an_pending = 1;
6303 mod_timer(&bp->timer, jiffies + bp->current_interval);
6306 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6307 bmcr &= ~BMCR_LOOPBACK;
6308 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6310 spin_unlock_bh(&bp->phy_lock);
6316 bnx2_get_eeprom_len(struct net_device *dev)
6318 struct bnx2 *bp = netdev_priv(dev);
6320 if (bp->flash_info == NULL)
6323 return (int) bp->flash_size;
6327 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6330 struct bnx2 *bp = netdev_priv(dev);
6333 /* parameters already validated in ethtool_get_eeprom */
6335 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6341 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6344 struct bnx2 *bp = netdev_priv(dev);
6347 /* parameters already validated in ethtool_set_eeprom */
6349 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6355 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6357 struct bnx2 *bp = netdev_priv(dev);
6359 memset(coal, 0, sizeof(struct ethtool_coalesce));
6361 coal->rx_coalesce_usecs = bp->rx_ticks;
6362 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6363 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6364 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6366 coal->tx_coalesce_usecs = bp->tx_ticks;
6367 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6368 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6369 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6371 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6377 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6379 struct bnx2 *bp = netdev_priv(dev);
6381 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6382 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6384 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6385 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6387 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6388 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6390 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6391 if (bp->rx_quick_cons_trip_int > 0xff)
6392 bp->rx_quick_cons_trip_int = 0xff;
6394 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6395 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6397 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6398 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6400 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6401 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6403 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6404 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6407 bp->stats_ticks = coal->stats_block_coalesce_usecs;
6408 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6409 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6410 bp->stats_ticks = USEC_PER_SEC;
6412 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6413 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6414 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6416 if (netif_running(bp->dev)) {
6417 bnx2_netif_stop(bp);
6419 bnx2_netif_start(bp);
6426 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6428 struct bnx2 *bp = netdev_priv(dev);
6430 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6431 ering->rx_mini_max_pending = 0;
6432 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6434 ering->rx_pending = bp->rx_ring_size;
6435 ering->rx_mini_pending = 0;
6436 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6438 ering->tx_max_pending = MAX_TX_DESC_CNT;
6439 ering->tx_pending = bp->tx_ring_size;
6443 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6445 if (netif_running(bp->dev)) {
6446 bnx2_netif_stop(bp);
6447 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6452 bnx2_set_rx_ring_size(bp, rx);
6453 bp->tx_ring_size = tx;
6455 if (netif_running(bp->dev)) {
6458 rc = bnx2_alloc_mem(bp);
6462 bnx2_netif_start(bp);
6468 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6470 struct bnx2 *bp = netdev_priv(dev);
6473 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6474 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6475 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6479 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6484 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6486 struct bnx2 *bp = netdev_priv(dev);
6488 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6489 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6490 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6494 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6496 struct bnx2 *bp = netdev_priv(dev);
6498 bp->req_flow_ctrl = 0;
6499 if (epause->rx_pause)
6500 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6501 if (epause->tx_pause)
6502 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6504 if (epause->autoneg) {
6505 bp->autoneg |= AUTONEG_FLOW_CTRL;
6508 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6511 spin_lock_bh(&bp->phy_lock);
6513 bnx2_setup_phy(bp, bp->phy_port);
6515 spin_unlock_bh(&bp->phy_lock);
6521 bnx2_get_rx_csum(struct net_device *dev)
6523 struct bnx2 *bp = netdev_priv(dev);
6529 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6531 struct bnx2 *bp = netdev_priv(dev);
6538 bnx2_set_tso(struct net_device *dev, u32 data)
6540 struct bnx2 *bp = netdev_priv(dev);
6543 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6544 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6545 dev->features |= NETIF_F_TSO6;
6547 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6552 #define BNX2_NUM_STATS 46
6555 char string[ETH_GSTRING_LEN];
6556 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6558 { "rx_error_bytes" },
6560 { "tx_error_bytes" },
6561 { "rx_ucast_packets" },
6562 { "rx_mcast_packets" },
6563 { "rx_bcast_packets" },
6564 { "tx_ucast_packets" },
6565 { "tx_mcast_packets" },
6566 { "tx_bcast_packets" },
6567 { "tx_mac_errors" },
6568 { "tx_carrier_errors" },
6569 { "rx_crc_errors" },
6570 { "rx_align_errors" },
6571 { "tx_single_collisions" },
6572 { "tx_multi_collisions" },
6574 { "tx_excess_collisions" },
6575 { "tx_late_collisions" },
6576 { "tx_total_collisions" },
6579 { "rx_undersize_packets" },
6580 { "rx_oversize_packets" },
6581 { "rx_64_byte_packets" },
6582 { "rx_65_to_127_byte_packets" },
6583 { "rx_128_to_255_byte_packets" },
6584 { "rx_256_to_511_byte_packets" },
6585 { "rx_512_to_1023_byte_packets" },
6586 { "rx_1024_to_1522_byte_packets" },
6587 { "rx_1523_to_9022_byte_packets" },
6588 { "tx_64_byte_packets" },
6589 { "tx_65_to_127_byte_packets" },
6590 { "tx_128_to_255_byte_packets" },
6591 { "tx_256_to_511_byte_packets" },
6592 { "tx_512_to_1023_byte_packets" },
6593 { "tx_1024_to_1522_byte_packets" },
6594 { "tx_1523_to_9022_byte_packets" },
6595 { "rx_xon_frames" },
6596 { "rx_xoff_frames" },
6597 { "tx_xon_frames" },
6598 { "tx_xoff_frames" },
6599 { "rx_mac_ctrl_frames" },
6600 { "rx_filtered_packets" },
6602 { "rx_fw_discards" },
6605 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6607 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6608 STATS_OFFSET32(stat_IfHCInOctets_hi),
6609 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6610 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6611 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6612 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6613 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6614 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6615 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6616 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6617 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6618 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6619 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6620 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6621 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6622 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6623 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6624 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6625 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6626 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6627 STATS_OFFSET32(stat_EtherStatsCollisions),
6628 STATS_OFFSET32(stat_EtherStatsFragments),
6629 STATS_OFFSET32(stat_EtherStatsJabbers),
6630 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6631 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6632 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6633 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6634 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6635 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6636 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6637 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6638 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6639 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6640 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6641 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6642 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6643 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6644 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6645 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6646 STATS_OFFSET32(stat_XonPauseFramesReceived),
6647 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6648 STATS_OFFSET32(stat_OutXonSent),
6649 STATS_OFFSET32(stat_OutXoffSent),
6650 STATS_OFFSET32(stat_MacControlFramesReceived),
6651 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6652 STATS_OFFSET32(stat_IfInMBUFDiscards),
6653 STATS_OFFSET32(stat_FwRxDrop),
6656 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6657 * skipped because of errata.
6659 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6660 8,0,8,8,8,8,8,8,8,8,
6661 4,0,4,4,4,4,4,4,4,4,
6662 4,4,4,4,4,4,4,4,4,4,
6663 4,4,4,4,4,4,4,4,4,4,
6667 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6668 8,0,8,8,8,8,8,8,8,8,
6669 4,4,4,4,4,4,4,4,4,4,
6670 4,4,4,4,4,4,4,4,4,4,
6671 4,4,4,4,4,4,4,4,4,4,
6675 #define BNX2_NUM_TESTS 6
6678 char string[ETH_GSTRING_LEN];
6679 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6680 { "register_test (offline)" },
6681 { "memory_test (offline)" },
6682 { "loopback_test (offline)" },
6683 { "nvram_test (online)" },
6684 { "interrupt_test (online)" },
6685 { "link_test (online)" },
6689 bnx2_get_sset_count(struct net_device *dev, int sset)
6693 return BNX2_NUM_TESTS;
6695 return BNX2_NUM_STATS;
6702 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6704 struct bnx2 *bp = netdev_priv(dev);
6706 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6707 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6710 bnx2_netif_stop(bp);
6711 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6714 if (bnx2_test_registers(bp) != 0) {
6716 etest->flags |= ETH_TEST_FL_FAILED;
6718 if (bnx2_test_memory(bp) != 0) {
6720 etest->flags |= ETH_TEST_FL_FAILED;
6722 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6723 etest->flags |= ETH_TEST_FL_FAILED;
6725 if (!netif_running(bp->dev)) {
6726 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6730 bnx2_netif_start(bp);
6733 /* wait for link up */
6734 for (i = 0; i < 7; i++) {
6737 msleep_interruptible(1000);
6741 if (bnx2_test_nvram(bp) != 0) {
6743 etest->flags |= ETH_TEST_FL_FAILED;
6745 if (bnx2_test_intr(bp) != 0) {
6747 etest->flags |= ETH_TEST_FL_FAILED;
6750 if (bnx2_test_link(bp) != 0) {
6752 etest->flags |= ETH_TEST_FL_FAILED;
6758 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6760 switch (stringset) {
6762 memcpy(buf, bnx2_stats_str_arr,
6763 sizeof(bnx2_stats_str_arr));
6766 memcpy(buf, bnx2_tests_str_arr,
6767 sizeof(bnx2_tests_str_arr));
6773 bnx2_get_ethtool_stats(struct net_device *dev,
6774 struct ethtool_stats *stats, u64 *buf)
6776 struct bnx2 *bp = netdev_priv(dev);
6778 u32 *hw_stats = (u32 *) bp->stats_blk;
6779 u8 *stats_len_arr = NULL;
6781 if (hw_stats == NULL) {
6782 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6786 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6787 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6788 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6789 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6790 stats_len_arr = bnx2_5706_stats_len_arr;
6792 stats_len_arr = bnx2_5708_stats_len_arr;
6794 for (i = 0; i < BNX2_NUM_STATS; i++) {
6795 if (stats_len_arr[i] == 0) {
6796 /* skip this counter */
6800 if (stats_len_arr[i] == 4) {
6801 /* 4-byte counter */
6803 *(hw_stats + bnx2_stats_offset_arr[i]);
6806 /* 8-byte counter */
6807 buf[i] = (((u64) *(hw_stats +
6808 bnx2_stats_offset_arr[i])) << 32) +
6809 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6814 bnx2_phys_id(struct net_device *dev, u32 data)
6816 struct bnx2 *bp = netdev_priv(dev);
6823 save = REG_RD(bp, BNX2_MISC_CFG);
6824 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6826 for (i = 0; i < (data * 2); i++) {
6828 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6831 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6832 BNX2_EMAC_LED_1000MB_OVERRIDE |
6833 BNX2_EMAC_LED_100MB_OVERRIDE |
6834 BNX2_EMAC_LED_10MB_OVERRIDE |
6835 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6836 BNX2_EMAC_LED_TRAFFIC);
6838 msleep_interruptible(500);
6839 if (signal_pending(current))
6842 REG_WR(bp, BNX2_EMAC_LED, 0);
6843 REG_WR(bp, BNX2_MISC_CFG, save);
6848 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6850 struct bnx2 *bp = netdev_priv(dev);
6852 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6853 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6855 return (ethtool_op_set_tx_csum(dev, data));
6858 static const struct ethtool_ops bnx2_ethtool_ops = {
6859 .get_settings = bnx2_get_settings,
6860 .set_settings = bnx2_set_settings,
6861 .get_drvinfo = bnx2_get_drvinfo,
6862 .get_regs_len = bnx2_get_regs_len,
6863 .get_regs = bnx2_get_regs,
6864 .get_wol = bnx2_get_wol,
6865 .set_wol = bnx2_set_wol,
6866 .nway_reset = bnx2_nway_reset,
6867 .get_link = ethtool_op_get_link,
6868 .get_eeprom_len = bnx2_get_eeprom_len,
6869 .get_eeprom = bnx2_get_eeprom,
6870 .set_eeprom = bnx2_set_eeprom,
6871 .get_coalesce = bnx2_get_coalesce,
6872 .set_coalesce = bnx2_set_coalesce,
6873 .get_ringparam = bnx2_get_ringparam,
6874 .set_ringparam = bnx2_set_ringparam,
6875 .get_pauseparam = bnx2_get_pauseparam,
6876 .set_pauseparam = bnx2_set_pauseparam,
6877 .get_rx_csum = bnx2_get_rx_csum,
6878 .set_rx_csum = bnx2_set_rx_csum,
6879 .set_tx_csum = bnx2_set_tx_csum,
6880 .set_sg = ethtool_op_set_sg,
6881 .set_tso = bnx2_set_tso,
6882 .self_test = bnx2_self_test,
6883 .get_strings = bnx2_get_strings,
6884 .phys_id = bnx2_phys_id,
6885 .get_ethtool_stats = bnx2_get_ethtool_stats,
6886 .get_sset_count = bnx2_get_sset_count,
6889 /* Called with rtnl_lock */
6891 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6893 struct mii_ioctl_data *data = if_mii(ifr);
6894 struct bnx2 *bp = netdev_priv(dev);
6899 data->phy_id = bp->phy_addr;
6905 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6908 if (!netif_running(dev))
6911 spin_lock_bh(&bp->phy_lock);
6912 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6913 spin_unlock_bh(&bp->phy_lock);
6915 data->val_out = mii_regval;
6921 if (!capable(CAP_NET_ADMIN))
6924 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6927 if (!netif_running(dev))
6930 spin_lock_bh(&bp->phy_lock);
6931 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6932 spin_unlock_bh(&bp->phy_lock);
6943 /* Called with rtnl_lock */
6945 bnx2_change_mac_addr(struct net_device *dev, void *p)
6947 struct sockaddr *addr = p;
6948 struct bnx2 *bp = netdev_priv(dev);
6950 if (!is_valid_ether_addr(addr->sa_data))
6953 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6954 if (netif_running(dev))
6955 bnx2_set_mac_addr(bp);
6960 /* Called with rtnl_lock */
6962 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6964 struct bnx2 *bp = netdev_priv(dev);
6966 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6967 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6971 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
6974 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6976 poll_bnx2(struct net_device *dev)
6978 struct bnx2 *bp = netdev_priv(dev);
6980 disable_irq(bp->pdev->irq);
6981 bnx2_interrupt(bp->pdev->irq, dev);
6982 enable_irq(bp->pdev->irq);
6986 static void __devinit
6987 bnx2_get_5709_media(struct bnx2 *bp)
6989 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6990 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6993 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6995 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6996 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7000 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7001 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7003 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7005 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7010 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7018 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7024 static void __devinit
7025 bnx2_get_pci_speed(struct bnx2 *bp)
7029 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7030 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7033 bp->flags |= BNX2_FLAG_PCIX;
7035 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7037 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7039 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7040 bp->bus_speed_mhz = 133;
7043 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7044 bp->bus_speed_mhz = 100;
7047 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7048 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7049 bp->bus_speed_mhz = 66;
7052 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7053 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7054 bp->bus_speed_mhz = 50;
7057 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7058 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7059 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7060 bp->bus_speed_mhz = 33;
7065 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7066 bp->bus_speed_mhz = 66;
7068 bp->bus_speed_mhz = 33;
7071 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7072 bp->flags |= BNX2_FLAG_PCI_32BIT;
7076 static int __devinit
7077 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7080 unsigned long mem_len;
7083 u64 dma_mask, persist_dma_mask;
7085 SET_NETDEV_DEV(dev, &pdev->dev);
7086 bp = netdev_priv(dev);
7091 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7092 rc = pci_enable_device(pdev);
7094 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7098 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7100 "Cannot find PCI device base address, aborting.\n");
7102 goto err_out_disable;
7105 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7107 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7108 goto err_out_disable;
7111 pci_set_master(pdev);
7113 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7114 if (bp->pm_cap == 0) {
7116 "Cannot find power management capability, aborting.\n");
7118 goto err_out_release;
7124 spin_lock_init(&bp->phy_lock);
7125 spin_lock_init(&bp->indirect_lock);
7126 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7128 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7129 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
7130 dev->mem_end = dev->mem_start + mem_len;
7131 dev->irq = pdev->irq;
7133 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7136 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7138 goto err_out_release;
7141 /* Configure byte swap and enable write to the reg_window registers.
7142 * Rely on CPU to do target byte swapping on big endian systems
7143 * The chip's target access swapping will not swap all accesses
7145 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7146 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7147 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7149 bnx2_set_power_state(bp, PCI_D0);
7151 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7153 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7154 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7156 "Cannot find PCIE capability, aborting.\n");
7160 bp->flags |= BNX2_FLAG_PCIE;
7161 if (CHIP_REV(bp) == CHIP_REV_Ax)
7162 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7164 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7165 if (bp->pcix_cap == 0) {
7167 "Cannot find PCIX capability, aborting.\n");
7173 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7174 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7175 bp->flags |= BNX2_FLAG_MSIX_CAP;
7178 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7179 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7180 bp->flags |= BNX2_FLAG_MSI_CAP;
7183 /* 5708 cannot support DMA addresses > 40-bit. */
7184 if (CHIP_NUM(bp) == CHIP_NUM_5708)
7185 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7187 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7189 /* Configure DMA attributes. */
7190 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7191 dev->features |= NETIF_F_HIGHDMA;
7192 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7195 "pci_set_consistent_dma_mask failed, aborting.\n");
7198 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7199 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7203 if (!(bp->flags & BNX2_FLAG_PCIE))
7204 bnx2_get_pci_speed(bp);
7206 /* 5706A0 may falsely detect SERR and PERR. */
7207 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7208 reg = REG_RD(bp, PCI_COMMAND);
7209 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7210 REG_WR(bp, PCI_COMMAND, reg);
7212 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7213 !(bp->flags & BNX2_FLAG_PCIX)) {
7216 "5706 A1 can only be used in a PCIX bus, aborting.\n");
7220 bnx2_init_nvram(bp);
7222 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7224 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7225 BNX2_SHM_HDR_SIGNATURE_SIG) {
7226 u32 off = PCI_FUNC(pdev->devfn) << 2;
7228 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7230 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7232 /* Get the permanent MAC address. First we need to make sure the
7233 * firmware is actually running.
7235 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7237 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7238 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7239 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7244 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7245 for (i = 0, j = 0; i < 3; i++) {
7248 num = (u8) (reg >> (24 - (i * 8)));
7249 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7250 if (num >= k || !skip0 || k == 1) {
7251 bp->fw_version[j++] = (num / k) + '0';
7256 bp->fw_version[j++] = '.';
7258 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7259 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7262 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7263 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7265 for (i = 0; i < 30; i++) {
7266 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7267 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7272 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7273 reg &= BNX2_CONDITION_MFW_RUN_MASK;
7274 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7275 reg != BNX2_CONDITION_MFW_RUN_NONE) {
7277 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7279 bp->fw_version[j++] = ' ';
7280 for (i = 0; i < 3; i++) {
7281 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7283 memcpy(&bp->fw_version[j], ®, 4);
7288 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7289 bp->mac_addr[0] = (u8) (reg >> 8);
7290 bp->mac_addr[1] = (u8) reg;
7292 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7293 bp->mac_addr[2] = (u8) (reg >> 24);
7294 bp->mac_addr[3] = (u8) (reg >> 16);
7295 bp->mac_addr[4] = (u8) (reg >> 8);
7296 bp->mac_addr[5] = (u8) reg;
7298 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
7300 bp->tx_ring_size = MAX_TX_DESC_CNT;
7301 bnx2_set_rx_ring_size(bp, 255);
7305 bp->tx_quick_cons_trip_int = 20;
7306 bp->tx_quick_cons_trip = 20;
7307 bp->tx_ticks_int = 80;
7310 bp->rx_quick_cons_trip_int = 6;
7311 bp->rx_quick_cons_trip = 6;
7312 bp->rx_ticks_int = 18;
7315 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7317 bp->timer_interval = HZ;
7318 bp->current_interval = HZ;
7322 /* Disable WOL support if we are running on a SERDES chip. */
7323 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7324 bnx2_get_5709_media(bp);
7325 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7326 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7328 bp->phy_port = PORT_TP;
7329 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7330 bp->phy_port = PORT_FIBRE;
7331 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7332 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7333 bp->flags |= BNX2_FLAG_NO_WOL;
7336 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7337 /* Don't do parallel detect on this board because of
7338 * some board problems. The link will not go down
7339 * if we do parallel detect.
7341 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7342 pdev->subsystem_device == 0x310c)
7343 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7346 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7347 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7349 bnx2_init_remote_phy(bp);
7351 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7352 CHIP_NUM(bp) == CHIP_NUM_5708)
7353 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7354 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7355 (CHIP_REV(bp) == CHIP_REV_Ax ||
7356 CHIP_REV(bp) == CHIP_REV_Bx))
7357 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7359 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7360 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7361 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
7362 bp->flags |= BNX2_FLAG_NO_WOL;
7366 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7367 bp->tx_quick_cons_trip_int =
7368 bp->tx_quick_cons_trip;
7369 bp->tx_ticks_int = bp->tx_ticks;
7370 bp->rx_quick_cons_trip_int =
7371 bp->rx_quick_cons_trip;
7372 bp->rx_ticks_int = bp->rx_ticks;
7373 bp->comp_prod_trip_int = bp->comp_prod_trip;
7374 bp->com_ticks_int = bp->com_ticks;
7375 bp->cmd_ticks_int = bp->cmd_ticks;
7378 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7380 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7381 * with byte enables disabled on the unused 32-bit word. This is legal
7382 * but causes problems on the AMD 8132 which will eventually stop
7383 * responding after a while.
7385 * AMD believes this incompatibility is unique to the 5706, and
7386 * prefers to locally disable MSI rather than globally disabling it.
7388 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7389 struct pci_dev *amd_8132 = NULL;
7391 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7392 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7395 if (amd_8132->revision >= 0x10 &&
7396 amd_8132->revision <= 0x13) {
7398 pci_dev_put(amd_8132);
7404 bnx2_set_default_link(bp);
7405 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7407 init_timer(&bp->timer);
7408 bp->timer.expires = RUN_AT(bp->timer_interval);
7409 bp->timer.data = (unsigned long) bp;
7410 bp->timer.function = bnx2_timer;
7416 iounmap(bp->regview);
7421 pci_release_regions(pdev);
7424 pci_disable_device(pdev);
7425 pci_set_drvdata(pdev, NULL);
7431 static char * __devinit
7432 bnx2_bus_string(struct bnx2 *bp, char *str)
7436 if (bp->flags & BNX2_FLAG_PCIE) {
7437 s += sprintf(s, "PCI Express");
7439 s += sprintf(s, "PCI");
7440 if (bp->flags & BNX2_FLAG_PCIX)
7441 s += sprintf(s, "-X");
7442 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7443 s += sprintf(s, " 32-bit");
7445 s += sprintf(s, " 64-bit");
7446 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7451 static void __devinit
7452 bnx2_init_napi(struct bnx2 *bp)
7455 struct bnx2_napi *bnapi;
7457 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7458 bnapi = &bp->bnx2_napi[i];
7461 netif_napi_add(bp->dev, &bp->bnx2_napi[0].napi, bnx2_poll, 64);
7462 netif_napi_add(bp->dev, &bp->bnx2_napi[BNX2_TX_VEC].napi, bnx2_tx_poll,
7466 static int __devinit
7467 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7469 static int version_printed = 0;
7470 struct net_device *dev = NULL;
7474 DECLARE_MAC_BUF(mac);
7476 if (version_printed++ == 0)
7477 printk(KERN_INFO "%s", version);
7479 /* dev zeroed in init_etherdev */
7480 dev = alloc_etherdev(sizeof(*bp));
7485 rc = bnx2_init_board(pdev, dev);
7491 dev->open = bnx2_open;
7492 dev->hard_start_xmit = bnx2_start_xmit;
7493 dev->stop = bnx2_close;
7494 dev->get_stats = bnx2_get_stats;
7495 dev->set_multicast_list = bnx2_set_rx_mode;
7496 dev->do_ioctl = bnx2_ioctl;
7497 dev->set_mac_address = bnx2_change_mac_addr;
7498 dev->change_mtu = bnx2_change_mtu;
7499 dev->tx_timeout = bnx2_tx_timeout;
7500 dev->watchdog_timeo = TX_TIMEOUT;
7502 dev->vlan_rx_register = bnx2_vlan_rx_register;
7504 dev->ethtool_ops = &bnx2_ethtool_ops;
7506 bp = netdev_priv(dev);
7509 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7510 dev->poll_controller = poll_bnx2;
7513 pci_set_drvdata(pdev, dev);
7515 memcpy(dev->dev_addr, bp->mac_addr, 6);
7516 memcpy(dev->perm_addr, bp->mac_addr, 6);
7517 bp->name = board_info[ent->driver_data].name;
7519 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7520 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7521 dev->features |= NETIF_F_IPV6_CSUM;
7524 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7526 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7527 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7528 dev->features |= NETIF_F_TSO6;
7530 if ((rc = register_netdev(dev))) {
7531 dev_err(&pdev->dev, "Cannot register net device\n");
7533 iounmap(bp->regview);
7534 pci_release_regions(pdev);
7535 pci_disable_device(pdev);
7536 pci_set_drvdata(pdev, NULL);
7541 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7542 "IRQ %d, node addr %s\n",
7545 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7546 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7547 bnx2_bus_string(bp, str),
7549 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7554 static void __devexit
7555 bnx2_remove_one(struct pci_dev *pdev)
7557 struct net_device *dev = pci_get_drvdata(pdev);
7558 struct bnx2 *bp = netdev_priv(dev);
7560 flush_scheduled_work();
7562 unregister_netdev(dev);
7565 iounmap(bp->regview);
7568 pci_release_regions(pdev);
7569 pci_disable_device(pdev);
7570 pci_set_drvdata(pdev, NULL);
7574 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7576 struct net_device *dev = pci_get_drvdata(pdev);
7577 struct bnx2 *bp = netdev_priv(dev);
7580 /* PCI register 4 needs to be saved whether netif_running() or not.
7581 * MSI address and data need to be saved if using MSI and
7584 pci_save_state(pdev);
7585 if (!netif_running(dev))
7588 flush_scheduled_work();
7589 bnx2_netif_stop(bp);
7590 netif_device_detach(dev);
7591 del_timer_sync(&bp->timer);
7592 if (bp->flags & BNX2_FLAG_NO_WOL)
7593 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7595 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7597 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7598 bnx2_reset_chip(bp, reset_code);
7600 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7605 bnx2_resume(struct pci_dev *pdev)
7607 struct net_device *dev = pci_get_drvdata(pdev);
7608 struct bnx2 *bp = netdev_priv(dev);
7610 pci_restore_state(pdev);
7611 if (!netif_running(dev))
7614 bnx2_set_power_state(bp, PCI_D0);
7615 netif_device_attach(dev);
7617 bnx2_netif_start(bp);
7621 static struct pci_driver bnx2_pci_driver = {
7622 .name = DRV_MODULE_NAME,
7623 .id_table = bnx2_pci_tbl,
7624 .probe = bnx2_init_one,
7625 .remove = __devexit_p(bnx2_remove_one),
7626 .suspend = bnx2_suspend,
7627 .resume = bnx2_resume,
7630 static int __init bnx2_init(void)
7632 return pci_register_driver(&bnx2_pci_driver);
7635 static void __exit bnx2_cleanup(void)
7637 pci_unregister_driver(&bnx2_pci_driver);
7640 module_init(bnx2_init);
7641 module_exit(bnx2_cleanup);