1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define FW_BUF_SIZE 0x10000
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 #define DRV_MODULE_VERSION "1.7.5"
60 #define DRV_MODULE_RELDATE "April 29, 2008"
62 #define RUN_AT(x) (jiffies + (x))
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (5*HZ)
67 static char version[] __devinitdata =
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
75 static int disable_msi = 0;
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92 /* indexed by board_t, above */
95 } board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
107 static struct pci_device_id bnx2_pci_tbl[] = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
129 static struct flash_spec flash_table[] =
131 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
134 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
138 /* Expansion entry 0001 */
139 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
143 /* Saifun SA25F010 (non-buffered flash) */
144 /* strap, cfg1, & write1 need updates */
145 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148 "Non-buffered flash (128kB)"},
149 /* Saifun SA25F020 (non-buffered flash) */
150 /* strap, cfg1, & write1 need updates */
151 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154 "Non-buffered flash (256kB)"},
155 /* Expansion entry 0100 */
156 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
160 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170 /* Saifun SA25F005 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175 "Non-buffered flash (64kB)"},
177 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
181 /* Expansion entry 1001 */
182 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 /* Expansion entry 1010 */
187 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
191 /* ATMEL AT45DB011B (buffered flash) */
192 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195 "Buffered flash (128kB)"},
196 /* Expansion entry 1100 */
197 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 /* Expansion entry 1101 */
202 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206 /* Ateml Expansion entry 1110 */
207 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1110 (Atmel)"},
211 /* ATMEL AT45DB021B (buffered flash) */
212 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215 "Buffered flash (256kB)"},
218 static struct flash_spec flash_5709 = {
219 .flags = BNX2_NV_BUFFERED,
220 .page_bits = BCM5709_FLASH_PAGE_BITS,
221 .page_size = BCM5709_FLASH_PAGE_SIZE,
222 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
223 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
224 .name = "5709 Buffered flash (256kB)",
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi)
235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped.
238 diff = bp->tx_prod - bnapi->tx_cons;
239 if (unlikely(diff >= TX_DESC_CNT)) {
241 if (diff == TX_DESC_CNT)
242 diff = MAX_TX_DESC_CNT;
244 return (bp->tx_ring_size - diff);
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
252 spin_lock_bh(&bp->indirect_lock);
253 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255 spin_unlock_bh(&bp->indirect_lock);
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
262 spin_lock_bh(&bp->indirect_lock);
263 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265 spin_unlock_bh(&bp->indirect_lock);
269 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
271 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
275 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
277 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
281 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
284 spin_lock_bh(&bp->indirect_lock);
285 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
288 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
289 REG_WR(bp, BNX2_CTX_CTX_CTRL,
290 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
291 for (i = 0; i < 5; i++) {
293 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
294 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
299 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
300 REG_WR(bp, BNX2_CTX_DATA, val);
302 spin_unlock_bh(&bp->indirect_lock);
306 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
311 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
312 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
313 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
315 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
316 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
321 val1 = (bp->phy_addr << 21) | (reg << 16) |
322 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
323 BNX2_EMAC_MDIO_COMM_START_BUSY;
324 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
326 for (i = 0; i < 50; i++) {
329 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
330 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
333 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
334 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
340 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
349 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
350 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
351 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
353 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
354 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
363 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
368 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
369 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
370 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
372 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
373 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
378 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
379 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
380 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
381 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
383 for (i = 0; i < 50; i++) {
386 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
387 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
393 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
398 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
399 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
400 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
402 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
403 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
412 bnx2_disable_int(struct bnx2 *bp)
415 struct bnx2_napi *bnapi;
417 for (i = 0; i < bp->irq_nvecs; i++) {
418 bnapi = &bp->bnx2_napi[i];
419 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
420 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
422 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
426 bnx2_enable_int(struct bnx2 *bp)
429 struct bnx2_napi *bnapi;
431 for (i = 0; i < bp->irq_nvecs; i++) {
432 bnapi = &bp->bnx2_napi[i];
434 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
435 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
436 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
437 bnapi->last_status_idx);
439 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
440 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
441 bnapi->last_status_idx);
443 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
447 bnx2_disable_int_sync(struct bnx2 *bp)
451 atomic_inc(&bp->intr_sem);
452 bnx2_disable_int(bp);
453 for (i = 0; i < bp->irq_nvecs; i++)
454 synchronize_irq(bp->irq_tbl[i].vector);
458 bnx2_napi_disable(struct bnx2 *bp)
462 for (i = 0; i < bp->irq_nvecs; i++)
463 napi_disable(&bp->bnx2_napi[i].napi);
467 bnx2_napi_enable(struct bnx2 *bp)
471 for (i = 0; i < bp->irq_nvecs; i++)
472 napi_enable(&bp->bnx2_napi[i].napi);
476 bnx2_netif_stop(struct bnx2 *bp)
478 bnx2_disable_int_sync(bp);
479 if (netif_running(bp->dev)) {
480 bnx2_napi_disable(bp);
481 netif_tx_disable(bp->dev);
482 bp->dev->trans_start = jiffies; /* prevent tx timeout */
487 bnx2_netif_start(struct bnx2 *bp)
489 if (atomic_dec_and_test(&bp->intr_sem)) {
490 if (netif_running(bp->dev)) {
491 netif_wake_queue(bp->dev);
492 bnx2_napi_enable(bp);
499 bnx2_free_mem(struct bnx2 *bp)
503 for (i = 0; i < bp->ctx_pages; i++) {
504 if (bp->ctx_blk[i]) {
505 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
507 bp->ctx_blk_mapping[i]);
508 bp->ctx_blk[i] = NULL;
511 if (bp->status_blk) {
512 pci_free_consistent(bp->pdev, bp->status_stats_size,
513 bp->status_blk, bp->status_blk_mapping);
514 bp->status_blk = NULL;
515 bp->stats_blk = NULL;
517 if (bp->tx_desc_ring) {
518 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
519 bp->tx_desc_ring, bp->tx_desc_mapping);
520 bp->tx_desc_ring = NULL;
522 kfree(bp->tx_buf_ring);
523 bp->tx_buf_ring = NULL;
524 for (i = 0; i < bp->rx_max_ring; i++) {
525 if (bp->rx_desc_ring[i])
526 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
528 bp->rx_desc_mapping[i]);
529 bp->rx_desc_ring[i] = NULL;
531 vfree(bp->rx_buf_ring);
532 bp->rx_buf_ring = NULL;
533 for (i = 0; i < bp->rx_max_pg_ring; i++) {
534 if (bp->rx_pg_desc_ring[i])
535 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
536 bp->rx_pg_desc_ring[i],
537 bp->rx_pg_desc_mapping[i]);
538 bp->rx_pg_desc_ring[i] = NULL;
541 vfree(bp->rx_pg_ring);
542 bp->rx_pg_ring = NULL;
546 bnx2_alloc_mem(struct bnx2 *bp)
548 int i, status_blk_size;
550 bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
551 if (bp->tx_buf_ring == NULL)
554 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
555 &bp->tx_desc_mapping);
556 if (bp->tx_desc_ring == NULL)
559 bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
560 if (bp->rx_buf_ring == NULL)
563 memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
565 for (i = 0; i < bp->rx_max_ring; i++) {
566 bp->rx_desc_ring[i] =
567 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
568 &bp->rx_desc_mapping[i]);
569 if (bp->rx_desc_ring[i] == NULL)
574 if (bp->rx_pg_ring_size) {
575 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
577 if (bp->rx_pg_ring == NULL)
580 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
584 for (i = 0; i < bp->rx_max_pg_ring; i++) {
585 bp->rx_pg_desc_ring[i] =
586 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
587 &bp->rx_pg_desc_mapping[i]);
588 if (bp->rx_pg_desc_ring[i] == NULL)
593 /* Combine status and statistics blocks into one allocation. */
594 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
595 if (bp->flags & BNX2_FLAG_MSIX_CAP)
596 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
597 BNX2_SBLK_MSIX_ALIGN_SIZE);
598 bp->status_stats_size = status_blk_size +
599 sizeof(struct statistics_block);
601 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
602 &bp->status_blk_mapping);
603 if (bp->status_blk == NULL)
606 memset(bp->status_blk, 0, bp->status_stats_size);
608 bp->bnx2_napi[0].status_blk = bp->status_blk;
609 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
610 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
611 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
613 bnapi->status_blk_msix = (void *)
614 ((unsigned long) bp->status_blk +
615 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
616 bnapi->int_num = i << 24;
620 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
623 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
625 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
626 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
627 if (bp->ctx_pages == 0)
629 for (i = 0; i < bp->ctx_pages; i++) {
630 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
632 &bp->ctx_blk_mapping[i]);
633 if (bp->ctx_blk[i] == NULL)
645 bnx2_report_fw_link(struct bnx2 *bp)
647 u32 fw_link_status = 0;
649 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
655 switch (bp->line_speed) {
657 if (bp->duplex == DUPLEX_HALF)
658 fw_link_status = BNX2_LINK_STATUS_10HALF;
660 fw_link_status = BNX2_LINK_STATUS_10FULL;
663 if (bp->duplex == DUPLEX_HALF)
664 fw_link_status = BNX2_LINK_STATUS_100HALF;
666 fw_link_status = BNX2_LINK_STATUS_100FULL;
669 if (bp->duplex == DUPLEX_HALF)
670 fw_link_status = BNX2_LINK_STATUS_1000HALF;
672 fw_link_status = BNX2_LINK_STATUS_1000FULL;
675 if (bp->duplex == DUPLEX_HALF)
676 fw_link_status = BNX2_LINK_STATUS_2500HALF;
678 fw_link_status = BNX2_LINK_STATUS_2500FULL;
682 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
685 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
687 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
688 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
690 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
691 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
692 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
694 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
698 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
700 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
704 bnx2_xceiver_str(struct bnx2 *bp)
706 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
707 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
712 bnx2_report_link(struct bnx2 *bp)
715 netif_carrier_on(bp->dev);
716 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
717 bnx2_xceiver_str(bp));
719 printk("%d Mbps ", bp->line_speed);
721 if (bp->duplex == DUPLEX_FULL)
722 printk("full duplex");
724 printk("half duplex");
727 if (bp->flow_ctrl & FLOW_CTRL_RX) {
728 printk(", receive ");
729 if (bp->flow_ctrl & FLOW_CTRL_TX)
730 printk("& transmit ");
733 printk(", transmit ");
735 printk("flow control ON");
740 netif_carrier_off(bp->dev);
741 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
742 bnx2_xceiver_str(bp));
745 bnx2_report_fw_link(bp);
749 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
751 u32 local_adv, remote_adv;
754 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
755 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
757 if (bp->duplex == DUPLEX_FULL) {
758 bp->flow_ctrl = bp->req_flow_ctrl;
763 if (bp->duplex != DUPLEX_FULL) {
767 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
768 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
771 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
772 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
773 bp->flow_ctrl |= FLOW_CTRL_TX;
774 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
775 bp->flow_ctrl |= FLOW_CTRL_RX;
779 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
780 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
782 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
783 u32 new_local_adv = 0;
784 u32 new_remote_adv = 0;
786 if (local_adv & ADVERTISE_1000XPAUSE)
787 new_local_adv |= ADVERTISE_PAUSE_CAP;
788 if (local_adv & ADVERTISE_1000XPSE_ASYM)
789 new_local_adv |= ADVERTISE_PAUSE_ASYM;
790 if (remote_adv & ADVERTISE_1000XPAUSE)
791 new_remote_adv |= ADVERTISE_PAUSE_CAP;
792 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
793 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
795 local_adv = new_local_adv;
796 remote_adv = new_remote_adv;
799 /* See Table 28B-3 of 802.3ab-1999 spec. */
800 if (local_adv & ADVERTISE_PAUSE_CAP) {
801 if(local_adv & ADVERTISE_PAUSE_ASYM) {
802 if (remote_adv & ADVERTISE_PAUSE_CAP) {
803 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
805 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
806 bp->flow_ctrl = FLOW_CTRL_RX;
810 if (remote_adv & ADVERTISE_PAUSE_CAP) {
811 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
815 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
816 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
817 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
819 bp->flow_ctrl = FLOW_CTRL_TX;
825 bnx2_5709s_linkup(struct bnx2 *bp)
831 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
832 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
833 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
835 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
836 bp->line_speed = bp->req_line_speed;
837 bp->duplex = bp->req_duplex;
840 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
842 case MII_BNX2_GP_TOP_AN_SPEED_10:
843 bp->line_speed = SPEED_10;
845 case MII_BNX2_GP_TOP_AN_SPEED_100:
846 bp->line_speed = SPEED_100;
848 case MII_BNX2_GP_TOP_AN_SPEED_1G:
849 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
850 bp->line_speed = SPEED_1000;
852 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
853 bp->line_speed = SPEED_2500;
856 if (val & MII_BNX2_GP_TOP_AN_FD)
857 bp->duplex = DUPLEX_FULL;
859 bp->duplex = DUPLEX_HALF;
864 bnx2_5708s_linkup(struct bnx2 *bp)
869 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
870 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
871 case BCM5708S_1000X_STAT1_SPEED_10:
872 bp->line_speed = SPEED_10;
874 case BCM5708S_1000X_STAT1_SPEED_100:
875 bp->line_speed = SPEED_100;
877 case BCM5708S_1000X_STAT1_SPEED_1G:
878 bp->line_speed = SPEED_1000;
880 case BCM5708S_1000X_STAT1_SPEED_2G5:
881 bp->line_speed = SPEED_2500;
884 if (val & BCM5708S_1000X_STAT1_FD)
885 bp->duplex = DUPLEX_FULL;
887 bp->duplex = DUPLEX_HALF;
893 bnx2_5706s_linkup(struct bnx2 *bp)
895 u32 bmcr, local_adv, remote_adv, common;
898 bp->line_speed = SPEED_1000;
900 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
901 if (bmcr & BMCR_FULLDPLX) {
902 bp->duplex = DUPLEX_FULL;
905 bp->duplex = DUPLEX_HALF;
908 if (!(bmcr & BMCR_ANENABLE)) {
912 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
913 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
915 common = local_adv & remote_adv;
916 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
918 if (common & ADVERTISE_1000XFULL) {
919 bp->duplex = DUPLEX_FULL;
922 bp->duplex = DUPLEX_HALF;
930 bnx2_copper_linkup(struct bnx2 *bp)
934 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
935 if (bmcr & BMCR_ANENABLE) {
936 u32 local_adv, remote_adv, common;
938 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
939 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
941 common = local_adv & (remote_adv >> 2);
942 if (common & ADVERTISE_1000FULL) {
943 bp->line_speed = SPEED_1000;
944 bp->duplex = DUPLEX_FULL;
946 else if (common & ADVERTISE_1000HALF) {
947 bp->line_speed = SPEED_1000;
948 bp->duplex = DUPLEX_HALF;
951 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
952 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
954 common = local_adv & remote_adv;
955 if (common & ADVERTISE_100FULL) {
956 bp->line_speed = SPEED_100;
957 bp->duplex = DUPLEX_FULL;
959 else if (common & ADVERTISE_100HALF) {
960 bp->line_speed = SPEED_100;
961 bp->duplex = DUPLEX_HALF;
963 else if (common & ADVERTISE_10FULL) {
964 bp->line_speed = SPEED_10;
965 bp->duplex = DUPLEX_FULL;
967 else if (common & ADVERTISE_10HALF) {
968 bp->line_speed = SPEED_10;
969 bp->duplex = DUPLEX_HALF;
978 if (bmcr & BMCR_SPEED100) {
979 bp->line_speed = SPEED_100;
982 bp->line_speed = SPEED_10;
984 if (bmcr & BMCR_FULLDPLX) {
985 bp->duplex = DUPLEX_FULL;
988 bp->duplex = DUPLEX_HALF;
996 bnx2_init_rx_context0(struct bnx2 *bp)
998 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
1000 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1001 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1004 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1005 u32 lo_water, hi_water;
1007 if (bp->flow_ctrl & FLOW_CTRL_TX)
1008 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1010 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1011 if (lo_water >= bp->rx_ring_size)
1014 hi_water = bp->rx_ring_size / 4;
1016 if (hi_water <= lo_water)
1019 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1020 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1024 else if (hi_water == 0)
1026 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1028 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1032 bnx2_set_mac_link(struct bnx2 *bp)
1036 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1037 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1038 (bp->duplex == DUPLEX_HALF)) {
1039 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1042 /* Configure the EMAC mode register. */
1043 val = REG_RD(bp, BNX2_EMAC_MODE);
1045 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1046 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1047 BNX2_EMAC_MODE_25G_MODE);
1050 switch (bp->line_speed) {
1052 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1053 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1058 val |= BNX2_EMAC_MODE_PORT_MII;
1061 val |= BNX2_EMAC_MODE_25G_MODE;
1064 val |= BNX2_EMAC_MODE_PORT_GMII;
1069 val |= BNX2_EMAC_MODE_PORT_GMII;
1072 /* Set the MAC to operate in the appropriate duplex mode. */
1073 if (bp->duplex == DUPLEX_HALF)
1074 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1075 REG_WR(bp, BNX2_EMAC_MODE, val);
1077 /* Enable/disable rx PAUSE. */
1078 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1080 if (bp->flow_ctrl & FLOW_CTRL_RX)
1081 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1082 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1084 /* Enable/disable tx PAUSE. */
1085 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1086 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1088 if (bp->flow_ctrl & FLOW_CTRL_TX)
1089 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1090 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1092 /* Acknowledge the interrupt. */
1093 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1095 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1096 bnx2_init_rx_context0(bp);
1102 bnx2_enable_bmsr1(struct bnx2 *bp)
1104 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1105 (CHIP_NUM(bp) == CHIP_NUM_5709))
1106 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1107 MII_BNX2_BLK_ADDR_GP_STATUS);
1111 bnx2_disable_bmsr1(struct bnx2 *bp)
1113 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1114 (CHIP_NUM(bp) == CHIP_NUM_5709))
1115 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1116 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1120 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1125 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1128 if (bp->autoneg & AUTONEG_SPEED)
1129 bp->advertising |= ADVERTISED_2500baseX_Full;
1131 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1132 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1134 bnx2_read_phy(bp, bp->mii_up1, &up1);
1135 if (!(up1 & BCM5708S_UP1_2G5)) {
1136 up1 |= BCM5708S_UP1_2G5;
1137 bnx2_write_phy(bp, bp->mii_up1, up1);
1141 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1142 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1143 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1149 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1154 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1157 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1158 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1160 bnx2_read_phy(bp, bp->mii_up1, &up1);
1161 if (up1 & BCM5708S_UP1_2G5) {
1162 up1 &= ~BCM5708S_UP1_2G5;
1163 bnx2_write_phy(bp, bp->mii_up1, up1);
1167 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1168 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1169 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1175 bnx2_enable_forced_2g5(struct bnx2 *bp)
1179 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1182 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1185 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1186 MII_BNX2_BLK_ADDR_SERDES_DIG);
1187 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1188 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1189 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1190 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1192 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1193 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1194 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1196 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1197 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1198 bmcr |= BCM5708S_BMCR_FORCE_2500;
1201 if (bp->autoneg & AUTONEG_SPEED) {
1202 bmcr &= ~BMCR_ANENABLE;
1203 if (bp->req_duplex == DUPLEX_FULL)
1204 bmcr |= BMCR_FULLDPLX;
1206 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1210 bnx2_disable_forced_2g5(struct bnx2 *bp)
1214 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1217 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1220 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1221 MII_BNX2_BLK_ADDR_SERDES_DIG);
1222 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1223 val &= ~MII_BNX2_SD_MISC1_FORCE;
1224 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1226 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1227 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1228 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1230 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1231 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1232 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1235 if (bp->autoneg & AUTONEG_SPEED)
1236 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1237 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1241 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1245 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1246 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1248 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1250 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1254 bnx2_set_link(struct bnx2 *bp)
1259 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1264 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1267 link_up = bp->link_up;
1269 bnx2_enable_bmsr1(bp);
1270 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1271 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1272 bnx2_disable_bmsr1(bp);
1274 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1275 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1278 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1279 bnx2_5706s_force_link_dn(bp, 0);
1280 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1282 val = REG_RD(bp, BNX2_EMAC_STATUS);
1284 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1285 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1286 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1288 if ((val & BNX2_EMAC_STATUS_LINK) &&
1289 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1290 bmsr |= BMSR_LSTATUS;
1292 bmsr &= ~BMSR_LSTATUS;
1295 if (bmsr & BMSR_LSTATUS) {
1298 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1299 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1300 bnx2_5706s_linkup(bp);
1301 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1302 bnx2_5708s_linkup(bp);
1303 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1304 bnx2_5709s_linkup(bp);
1307 bnx2_copper_linkup(bp);
1309 bnx2_resolve_flow_ctrl(bp);
1312 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1313 (bp->autoneg & AUTONEG_SPEED))
1314 bnx2_disable_forced_2g5(bp);
1316 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1319 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1320 bmcr |= BMCR_ANENABLE;
1321 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1323 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1328 if (bp->link_up != link_up) {
1329 bnx2_report_link(bp);
1332 bnx2_set_mac_link(bp);
1338 bnx2_reset_phy(struct bnx2 *bp)
1343 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1345 #define PHY_RESET_MAX_WAIT 100
1346 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1349 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1350 if (!(reg & BMCR_RESET)) {
1355 if (i == PHY_RESET_MAX_WAIT) {
1362 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1366 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1367 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1369 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1370 adv = ADVERTISE_1000XPAUSE;
1373 adv = ADVERTISE_PAUSE_CAP;
1376 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1377 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1378 adv = ADVERTISE_1000XPSE_ASYM;
1381 adv = ADVERTISE_PAUSE_ASYM;
1384 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1385 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1386 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1389 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1395 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1398 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1400 u32 speed_arg = 0, pause_adv;
1402 pause_adv = bnx2_phy_get_pause_adv(bp);
1404 if (bp->autoneg & AUTONEG_SPEED) {
1405 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1406 if (bp->advertising & ADVERTISED_10baseT_Half)
1407 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1408 if (bp->advertising & ADVERTISED_10baseT_Full)
1409 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1410 if (bp->advertising & ADVERTISED_100baseT_Half)
1411 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1412 if (bp->advertising & ADVERTISED_100baseT_Full)
1413 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1414 if (bp->advertising & ADVERTISED_1000baseT_Full)
1415 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1416 if (bp->advertising & ADVERTISED_2500baseX_Full)
1417 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1419 if (bp->req_line_speed == SPEED_2500)
1420 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1421 else if (bp->req_line_speed == SPEED_1000)
1422 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1423 else if (bp->req_line_speed == SPEED_100) {
1424 if (bp->req_duplex == DUPLEX_FULL)
1425 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1427 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1428 } else if (bp->req_line_speed == SPEED_10) {
1429 if (bp->req_duplex == DUPLEX_FULL)
1430 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1432 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1436 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1437 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1438 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1439 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1441 if (port == PORT_TP)
1442 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1443 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1445 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1447 spin_unlock_bh(&bp->phy_lock);
1448 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1449 spin_lock_bh(&bp->phy_lock);
1455 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1460 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1461 return (bnx2_setup_remote_phy(bp, port));
1463 if (!(bp->autoneg & AUTONEG_SPEED)) {
1465 int force_link_down = 0;
1467 if (bp->req_line_speed == SPEED_2500) {
1468 if (!bnx2_test_and_enable_2g5(bp))
1469 force_link_down = 1;
1470 } else if (bp->req_line_speed == SPEED_1000) {
1471 if (bnx2_test_and_disable_2g5(bp))
1472 force_link_down = 1;
1474 bnx2_read_phy(bp, bp->mii_adv, &adv);
1475 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1477 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1478 new_bmcr = bmcr & ~BMCR_ANENABLE;
1479 new_bmcr |= BMCR_SPEED1000;
1481 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1482 if (bp->req_line_speed == SPEED_2500)
1483 bnx2_enable_forced_2g5(bp);
1484 else if (bp->req_line_speed == SPEED_1000) {
1485 bnx2_disable_forced_2g5(bp);
1486 new_bmcr &= ~0x2000;
1489 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1490 if (bp->req_line_speed == SPEED_2500)
1491 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1493 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1496 if (bp->req_duplex == DUPLEX_FULL) {
1497 adv |= ADVERTISE_1000XFULL;
1498 new_bmcr |= BMCR_FULLDPLX;
1501 adv |= ADVERTISE_1000XHALF;
1502 new_bmcr &= ~BMCR_FULLDPLX;
1504 if ((new_bmcr != bmcr) || (force_link_down)) {
1505 /* Force a link down visible on the other side */
1507 bnx2_write_phy(bp, bp->mii_adv, adv &
1508 ~(ADVERTISE_1000XFULL |
1509 ADVERTISE_1000XHALF));
1510 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1511 BMCR_ANRESTART | BMCR_ANENABLE);
1514 netif_carrier_off(bp->dev);
1515 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1516 bnx2_report_link(bp);
1518 bnx2_write_phy(bp, bp->mii_adv, adv);
1519 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1521 bnx2_resolve_flow_ctrl(bp);
1522 bnx2_set_mac_link(bp);
1527 bnx2_test_and_enable_2g5(bp);
1529 if (bp->advertising & ADVERTISED_1000baseT_Full)
1530 new_adv |= ADVERTISE_1000XFULL;
1532 new_adv |= bnx2_phy_get_pause_adv(bp);
1534 bnx2_read_phy(bp, bp->mii_adv, &adv);
1535 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1537 bp->serdes_an_pending = 0;
1538 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1539 /* Force a link down visible on the other side */
1541 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1542 spin_unlock_bh(&bp->phy_lock);
1544 spin_lock_bh(&bp->phy_lock);
1547 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1548 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1550 /* Speed up link-up time when the link partner
1551 * does not autonegotiate which is very common
1552 * in blade servers. Some blade servers use
1553 * IPMI for kerboard input and it's important
1554 * to minimize link disruptions. Autoneg. involves
1555 * exchanging base pages plus 3 next pages and
1556 * normally completes in about 120 msec.
1558 bp->current_interval = SERDES_AN_TIMEOUT;
1559 bp->serdes_an_pending = 1;
1560 mod_timer(&bp->timer, jiffies + bp->current_interval);
1562 bnx2_resolve_flow_ctrl(bp);
1563 bnx2_set_mac_link(bp);
1569 #define ETHTOOL_ALL_FIBRE_SPEED \
1570 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1571 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1572 (ADVERTISED_1000baseT_Full)
1574 #define ETHTOOL_ALL_COPPER_SPEED \
1575 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1576 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1577 ADVERTISED_1000baseT_Full)
1579 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1580 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1582 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1585 bnx2_set_default_remote_link(struct bnx2 *bp)
1589 if (bp->phy_port == PORT_TP)
1590 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1592 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1594 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1595 bp->req_line_speed = 0;
1596 bp->autoneg |= AUTONEG_SPEED;
1597 bp->advertising = ADVERTISED_Autoneg;
1598 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1599 bp->advertising |= ADVERTISED_10baseT_Half;
1600 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1601 bp->advertising |= ADVERTISED_10baseT_Full;
1602 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1603 bp->advertising |= ADVERTISED_100baseT_Half;
1604 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1605 bp->advertising |= ADVERTISED_100baseT_Full;
1606 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1607 bp->advertising |= ADVERTISED_1000baseT_Full;
1608 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1609 bp->advertising |= ADVERTISED_2500baseX_Full;
1612 bp->advertising = 0;
1613 bp->req_duplex = DUPLEX_FULL;
1614 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1615 bp->req_line_speed = SPEED_10;
1616 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1617 bp->req_duplex = DUPLEX_HALF;
1619 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1620 bp->req_line_speed = SPEED_100;
1621 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1622 bp->req_duplex = DUPLEX_HALF;
1624 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1625 bp->req_line_speed = SPEED_1000;
1626 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1627 bp->req_line_speed = SPEED_2500;
1632 bnx2_set_default_link(struct bnx2 *bp)
1634 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1635 bnx2_set_default_remote_link(bp);
1639 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1640 bp->req_line_speed = 0;
1641 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1644 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1646 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1647 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1648 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1650 bp->req_line_speed = bp->line_speed = SPEED_1000;
1651 bp->req_duplex = DUPLEX_FULL;
1654 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1658 bnx2_send_heart_beat(struct bnx2 *bp)
1663 spin_lock(&bp->indirect_lock);
1664 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1665 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1666 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1667 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1668 spin_unlock(&bp->indirect_lock);
1672 bnx2_remote_phy_event(struct bnx2 *bp)
1675 u8 link_up = bp->link_up;
1678 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1680 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1681 bnx2_send_heart_beat(bp);
1683 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1685 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1691 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1692 bp->duplex = DUPLEX_FULL;
1694 case BNX2_LINK_STATUS_10HALF:
1695 bp->duplex = DUPLEX_HALF;
1696 case BNX2_LINK_STATUS_10FULL:
1697 bp->line_speed = SPEED_10;
1699 case BNX2_LINK_STATUS_100HALF:
1700 bp->duplex = DUPLEX_HALF;
1701 case BNX2_LINK_STATUS_100BASE_T4:
1702 case BNX2_LINK_STATUS_100FULL:
1703 bp->line_speed = SPEED_100;
1705 case BNX2_LINK_STATUS_1000HALF:
1706 bp->duplex = DUPLEX_HALF;
1707 case BNX2_LINK_STATUS_1000FULL:
1708 bp->line_speed = SPEED_1000;
1710 case BNX2_LINK_STATUS_2500HALF:
1711 bp->duplex = DUPLEX_HALF;
1712 case BNX2_LINK_STATUS_2500FULL:
1713 bp->line_speed = SPEED_2500;
1721 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1722 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1723 if (bp->duplex == DUPLEX_FULL)
1724 bp->flow_ctrl = bp->req_flow_ctrl;
1726 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1727 bp->flow_ctrl |= FLOW_CTRL_TX;
1728 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1729 bp->flow_ctrl |= FLOW_CTRL_RX;
1732 old_port = bp->phy_port;
1733 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1734 bp->phy_port = PORT_FIBRE;
1736 bp->phy_port = PORT_TP;
1738 if (old_port != bp->phy_port)
1739 bnx2_set_default_link(bp);
1742 if (bp->link_up != link_up)
1743 bnx2_report_link(bp);
1745 bnx2_set_mac_link(bp);
1749 bnx2_set_remote_link(struct bnx2 *bp)
1753 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
1755 case BNX2_FW_EVT_CODE_LINK_EVENT:
1756 bnx2_remote_phy_event(bp);
1758 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1760 bnx2_send_heart_beat(bp);
1767 bnx2_setup_copper_phy(struct bnx2 *bp)
1772 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1774 if (bp->autoneg & AUTONEG_SPEED) {
1775 u32 adv_reg, adv1000_reg;
1776 u32 new_adv_reg = 0;
1777 u32 new_adv1000_reg = 0;
1779 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1780 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1781 ADVERTISE_PAUSE_ASYM);
1783 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1784 adv1000_reg &= PHY_ALL_1000_SPEED;
1786 if (bp->advertising & ADVERTISED_10baseT_Half)
1787 new_adv_reg |= ADVERTISE_10HALF;
1788 if (bp->advertising & ADVERTISED_10baseT_Full)
1789 new_adv_reg |= ADVERTISE_10FULL;
1790 if (bp->advertising & ADVERTISED_100baseT_Half)
1791 new_adv_reg |= ADVERTISE_100HALF;
1792 if (bp->advertising & ADVERTISED_100baseT_Full)
1793 new_adv_reg |= ADVERTISE_100FULL;
1794 if (bp->advertising & ADVERTISED_1000baseT_Full)
1795 new_adv1000_reg |= ADVERTISE_1000FULL;
1797 new_adv_reg |= ADVERTISE_CSMA;
1799 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1801 if ((adv1000_reg != new_adv1000_reg) ||
1802 (adv_reg != new_adv_reg) ||
1803 ((bmcr & BMCR_ANENABLE) == 0)) {
1805 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1806 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1807 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1810 else if (bp->link_up) {
1811 /* Flow ctrl may have changed from auto to forced */
1812 /* or vice-versa. */
1814 bnx2_resolve_flow_ctrl(bp);
1815 bnx2_set_mac_link(bp);
1821 if (bp->req_line_speed == SPEED_100) {
1822 new_bmcr |= BMCR_SPEED100;
1824 if (bp->req_duplex == DUPLEX_FULL) {
1825 new_bmcr |= BMCR_FULLDPLX;
1827 if (new_bmcr != bmcr) {
1830 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1831 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1833 if (bmsr & BMSR_LSTATUS) {
1834 /* Force link down */
1835 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1836 spin_unlock_bh(&bp->phy_lock);
1838 spin_lock_bh(&bp->phy_lock);
1840 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1841 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1844 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1846 /* Normally, the new speed is setup after the link has
1847 * gone down and up again. In some cases, link will not go
1848 * down so we need to set up the new speed here.
1850 if (bmsr & BMSR_LSTATUS) {
1851 bp->line_speed = bp->req_line_speed;
1852 bp->duplex = bp->req_duplex;
1853 bnx2_resolve_flow_ctrl(bp);
1854 bnx2_set_mac_link(bp);
1857 bnx2_resolve_flow_ctrl(bp);
1858 bnx2_set_mac_link(bp);
1864 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1866 if (bp->loopback == MAC_LOOPBACK)
1869 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1870 return (bnx2_setup_serdes_phy(bp, port));
1873 return (bnx2_setup_copper_phy(bp));
1878 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
1882 bp->mii_bmcr = MII_BMCR + 0x10;
1883 bp->mii_bmsr = MII_BMSR + 0x10;
1884 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1885 bp->mii_adv = MII_ADVERTISE + 0x10;
1886 bp->mii_lpa = MII_LPA + 0x10;
1887 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1889 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1890 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1892 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1896 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1898 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1899 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1900 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1901 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1903 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1904 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1905 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
1906 val |= BCM5708S_UP1_2G5;
1908 val &= ~BCM5708S_UP1_2G5;
1909 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1911 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1912 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1913 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1914 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1916 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1918 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1919 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1920 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1922 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1928 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
1935 bp->mii_up1 = BCM5708S_UP1;
1937 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1938 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1939 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1941 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1942 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1943 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1945 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1946 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1947 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1949 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
1950 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1951 val |= BCM5708S_UP1_2G5;
1952 bnx2_write_phy(bp, BCM5708S_UP1, val);
1955 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1956 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1957 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1958 /* increase tx signal amplitude */
1959 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1960 BCM5708S_BLK_ADDR_TX_MISC);
1961 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1962 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1963 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1964 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1967 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
1968 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1973 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
1974 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1975 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1976 BCM5708S_BLK_ADDR_TX_MISC);
1977 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1978 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1979 BCM5708S_BLK_ADDR_DIG);
1986 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
1991 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1993 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1994 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1996 if (bp->dev->mtu > 1500) {
1999 /* Set extended packet length bit */
2000 bnx2_write_phy(bp, 0x18, 0x7);
2001 bnx2_read_phy(bp, 0x18, &val);
2002 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2004 bnx2_write_phy(bp, 0x1c, 0x6c00);
2005 bnx2_read_phy(bp, 0x1c, &val);
2006 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2011 bnx2_write_phy(bp, 0x18, 0x7);
2012 bnx2_read_phy(bp, 0x18, &val);
2013 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2015 bnx2_write_phy(bp, 0x1c, 0x6c00);
2016 bnx2_read_phy(bp, 0x1c, &val);
2017 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2024 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2031 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2032 bnx2_write_phy(bp, 0x18, 0x0c00);
2033 bnx2_write_phy(bp, 0x17, 0x000a);
2034 bnx2_write_phy(bp, 0x15, 0x310b);
2035 bnx2_write_phy(bp, 0x17, 0x201f);
2036 bnx2_write_phy(bp, 0x15, 0x9506);
2037 bnx2_write_phy(bp, 0x17, 0x401f);
2038 bnx2_write_phy(bp, 0x15, 0x14e2);
2039 bnx2_write_phy(bp, 0x18, 0x0400);
2042 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2043 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2044 MII_BNX2_DSP_EXPAND_REG | 0x8);
2045 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2047 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2050 if (bp->dev->mtu > 1500) {
2051 /* Set extended packet length bit */
2052 bnx2_write_phy(bp, 0x18, 0x7);
2053 bnx2_read_phy(bp, 0x18, &val);
2054 bnx2_write_phy(bp, 0x18, val | 0x4000);
2056 bnx2_read_phy(bp, 0x10, &val);
2057 bnx2_write_phy(bp, 0x10, val | 0x1);
2060 bnx2_write_phy(bp, 0x18, 0x7);
2061 bnx2_read_phy(bp, 0x18, &val);
2062 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2064 bnx2_read_phy(bp, 0x10, &val);
2065 bnx2_write_phy(bp, 0x10, val & ~0x1);
2068 /* ethernet@wirespeed */
2069 bnx2_write_phy(bp, 0x18, 0x7007);
2070 bnx2_read_phy(bp, 0x18, &val);
2071 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2077 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2082 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2083 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2085 bp->mii_bmcr = MII_BMCR;
2086 bp->mii_bmsr = MII_BMSR;
2087 bp->mii_bmsr1 = MII_BMSR;
2088 bp->mii_adv = MII_ADVERTISE;
2089 bp->mii_lpa = MII_LPA;
2091 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2093 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2096 bnx2_read_phy(bp, MII_PHYSID1, &val);
2097 bp->phy_id = val << 16;
2098 bnx2_read_phy(bp, MII_PHYSID2, &val);
2099 bp->phy_id |= val & 0xffff;
2101 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2102 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2103 rc = bnx2_init_5706s_phy(bp, reset_phy);
2104 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2105 rc = bnx2_init_5708s_phy(bp, reset_phy);
2106 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2107 rc = bnx2_init_5709s_phy(bp, reset_phy);
2110 rc = bnx2_init_copper_phy(bp, reset_phy);
2115 rc = bnx2_setup_phy(bp, bp->phy_port);
2121 bnx2_set_mac_loopback(struct bnx2 *bp)
2125 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2126 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2127 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2128 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2133 static int bnx2_test_link(struct bnx2 *);
2136 bnx2_set_phy_loopback(struct bnx2 *bp)
2141 spin_lock_bh(&bp->phy_lock);
2142 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2144 spin_unlock_bh(&bp->phy_lock);
2148 for (i = 0; i < 10; i++) {
2149 if (bnx2_test_link(bp) == 0)
2154 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2155 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2156 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2157 BNX2_EMAC_MODE_25G_MODE);
2159 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2160 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2166 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2172 msg_data |= bp->fw_wr_seq;
2174 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2176 /* wait for an acknowledgement. */
2177 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2180 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2182 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2185 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2188 /* If we timed out, inform the firmware that this is the case. */
2189 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2191 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2194 msg_data &= ~BNX2_DRV_MSG_CODE;
2195 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2197 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2202 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2209 bnx2_init_5709_context(struct bnx2 *bp)
2214 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2215 val |= (BCM_PAGE_BITS - 8) << 16;
2216 REG_WR(bp, BNX2_CTX_COMMAND, val);
2217 for (i = 0; i < 10; i++) {
2218 val = REG_RD(bp, BNX2_CTX_COMMAND);
2219 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2223 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2226 for (i = 0; i < bp->ctx_pages; i++) {
2230 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2234 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2235 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2236 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2237 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2238 (u64) bp->ctx_blk_mapping[i] >> 32);
2239 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2240 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2241 for (j = 0; j < 10; j++) {
2243 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2244 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2248 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2257 bnx2_init_context(struct bnx2 *bp)
2263 u32 vcid_addr, pcid_addr, offset;
2268 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2271 vcid_addr = GET_PCID_ADDR(vcid);
2273 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2278 pcid_addr = GET_PCID_ADDR(new_vcid);
2281 vcid_addr = GET_CID_ADDR(vcid);
2282 pcid_addr = vcid_addr;
2285 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2286 vcid_addr += (i << PHY_CTX_SHIFT);
2287 pcid_addr += (i << PHY_CTX_SHIFT);
2289 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2290 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2292 /* Zero out the context. */
2293 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2294 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2300 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2306 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2307 if (good_mbuf == NULL) {
2308 printk(KERN_ERR PFX "Failed to allocate memory in "
2309 "bnx2_alloc_bad_rbuf\n");
2313 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2314 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2318 /* Allocate a bunch of mbufs and save the good ones in an array. */
2319 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2320 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2321 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2322 BNX2_RBUF_COMMAND_ALLOC_REQ);
2324 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2326 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2328 /* The addresses with Bit 9 set are bad memory blocks. */
2329 if (!(val & (1 << 9))) {
2330 good_mbuf[good_mbuf_cnt] = (u16) val;
2334 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2337 /* Free the good ones back to the mbuf pool thus discarding
2338 * all the bad ones. */
2339 while (good_mbuf_cnt) {
2342 val = good_mbuf[good_mbuf_cnt];
2343 val = (val << 9) | val | 1;
2345 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2352 bnx2_set_mac_addr(struct bnx2 *bp)
2355 u8 *mac_addr = bp->dev->dev_addr;
2357 val = (mac_addr[0] << 8) | mac_addr[1];
2359 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2361 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2362 (mac_addr[4] << 8) | mac_addr[5];
2364 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2368 bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2371 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2372 struct rx_bd *rxbd =
2373 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2374 struct page *page = alloc_page(GFP_ATOMIC);
2378 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2379 PCI_DMA_FROMDEVICE);
2381 pci_unmap_addr_set(rx_pg, mapping, mapping);
2382 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2383 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2388 bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2390 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2391 struct page *page = rx_pg->page;
2396 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2397 PCI_DMA_FROMDEVICE);
2404 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
2406 struct sk_buff *skb;
2407 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2409 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2410 unsigned long align;
2412 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2417 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2418 skb_reserve(skb, BNX2_RX_ALIGN - align);
2420 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2421 PCI_DMA_FROMDEVICE);
2424 pci_unmap_addr_set(rx_buf, mapping, mapping);
2426 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2427 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2429 bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2435 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2437 struct status_block *sblk = bnapi->status_blk;
2438 u32 new_link_state, old_link_state;
2441 new_link_state = sblk->status_attn_bits & event;
2442 old_link_state = sblk->status_attn_bits_ack & event;
2443 if (new_link_state != old_link_state) {
2445 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2447 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2455 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2457 spin_lock(&bp->phy_lock);
2459 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2461 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2462 bnx2_set_remote_link(bp);
2464 spin_unlock(&bp->phy_lock);
2469 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2473 if (bnapi->int_num == 0)
2474 cons = bnapi->status_blk->status_tx_quick_consumer_index0;
2476 cons = bnapi->status_blk_msix->status_tx_quick_consumer_index;
2478 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2484 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2486 u16 hw_cons, sw_cons, sw_ring_cons;
2489 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2490 sw_cons = bnapi->tx_cons;
2492 while (sw_cons != hw_cons) {
2493 struct sw_bd *tx_buf;
2494 struct sk_buff *skb;
2497 sw_ring_cons = TX_RING_IDX(sw_cons);
2499 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2502 /* partial BD completions possible with TSO packets */
2503 if (skb_is_gso(skb)) {
2504 u16 last_idx, last_ring_idx;
2506 last_idx = sw_cons +
2507 skb_shinfo(skb)->nr_frags + 1;
2508 last_ring_idx = sw_ring_cons +
2509 skb_shinfo(skb)->nr_frags + 1;
2510 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2513 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2518 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2519 skb_headlen(skb), PCI_DMA_TODEVICE);
2522 last = skb_shinfo(skb)->nr_frags;
2524 for (i = 0; i < last; i++) {
2525 sw_cons = NEXT_TX_BD(sw_cons);
2527 pci_unmap_page(bp->pdev,
2529 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2531 skb_shinfo(skb)->frags[i].size,
2535 sw_cons = NEXT_TX_BD(sw_cons);
2539 if (tx_pkt == budget)
2542 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2545 bnapi->hw_tx_cons = hw_cons;
2546 bnapi->tx_cons = sw_cons;
2547 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2548 * before checking for netif_queue_stopped(). Without the
2549 * memory barrier, there is a small possibility that bnx2_start_xmit()
2550 * will miss it and cause the queue to be stopped forever.
2554 if (unlikely(netif_queue_stopped(bp->dev)) &&
2555 (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) {
2556 netif_tx_lock(bp->dev);
2557 if ((netif_queue_stopped(bp->dev)) &&
2558 (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh))
2559 netif_wake_queue(bp->dev);
2560 netif_tx_unlock(bp->dev);
2566 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi,
2567 struct sk_buff *skb, int count)
2569 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2570 struct rx_bd *cons_bd, *prod_bd;
2573 u16 hw_prod = bnapi->rx_pg_prod, prod;
2574 u16 cons = bnapi->rx_pg_cons;
2576 for (i = 0; i < count; i++) {
2577 prod = RX_PG_RING_IDX(hw_prod);
2579 prod_rx_pg = &bp->rx_pg_ring[prod];
2580 cons_rx_pg = &bp->rx_pg_ring[cons];
2581 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2582 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2584 if (i == 0 && skb) {
2586 struct skb_shared_info *shinfo;
2588 shinfo = skb_shinfo(skb);
2590 page = shinfo->frags[shinfo->nr_frags].page;
2591 shinfo->frags[shinfo->nr_frags].page = NULL;
2592 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2593 PCI_DMA_FROMDEVICE);
2594 cons_rx_pg->page = page;
2595 pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2599 prod_rx_pg->page = cons_rx_pg->page;
2600 cons_rx_pg->page = NULL;
2601 pci_unmap_addr_set(prod_rx_pg, mapping,
2602 pci_unmap_addr(cons_rx_pg, mapping));
2604 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2605 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2608 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2609 hw_prod = NEXT_RX_BD(hw_prod);
2611 bnapi->rx_pg_prod = hw_prod;
2612 bnapi->rx_pg_cons = cons;
2616 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2619 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2620 struct rx_bd *cons_bd, *prod_bd;
2622 cons_rx_buf = &bp->rx_buf_ring[cons];
2623 prod_rx_buf = &bp->rx_buf_ring[prod];
2625 pci_dma_sync_single_for_device(bp->pdev,
2626 pci_unmap_addr(cons_rx_buf, mapping),
2627 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2629 bnapi->rx_prod_bseq += bp->rx_buf_use_size;
2631 prod_rx_buf->skb = skb;
2636 pci_unmap_addr_set(prod_rx_buf, mapping,
2637 pci_unmap_addr(cons_rx_buf, mapping));
2639 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2640 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2641 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2642 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2646 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2647 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2651 u16 prod = ring_idx & 0xffff;
2653 err = bnx2_alloc_rx_skb(bp, bnapi, prod);
2654 if (unlikely(err)) {
2655 bnx2_reuse_rx_skb(bp, bnapi, skb, (u16) (ring_idx >> 16), prod);
2657 unsigned int raw_len = len + 4;
2658 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2660 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL, pages);
2665 skb_reserve(skb, BNX2_RX_OFFSET);
2666 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2667 PCI_DMA_FROMDEVICE);
2673 unsigned int i, frag_len, frag_size, pages;
2674 struct sw_pg *rx_pg;
2675 u16 pg_cons = bnapi->rx_pg_cons;
2676 u16 pg_prod = bnapi->rx_pg_prod;
2678 frag_size = len + 4 - hdr_len;
2679 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2680 skb_put(skb, hdr_len);
2682 for (i = 0; i < pages; i++) {
2683 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2684 if (unlikely(frag_len <= 4)) {
2685 unsigned int tail = 4 - frag_len;
2687 bnapi->rx_pg_cons = pg_cons;
2688 bnapi->rx_pg_prod = pg_prod;
2689 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL,
2696 &skb_shinfo(skb)->frags[i - 1];
2698 skb->data_len -= tail;
2699 skb->truesize -= tail;
2703 rx_pg = &bp->rx_pg_ring[pg_cons];
2705 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2706 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2711 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2714 err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2715 if (unlikely(err)) {
2716 bnapi->rx_pg_cons = pg_cons;
2717 bnapi->rx_pg_prod = pg_prod;
2718 bnx2_reuse_rx_skb_pages(bp, bnapi, skb,
2723 frag_size -= frag_len;
2724 skb->data_len += frag_len;
2725 skb->truesize += frag_len;
2726 skb->len += frag_len;
2728 pg_prod = NEXT_RX_BD(pg_prod);
2729 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2731 bnapi->rx_pg_prod = pg_prod;
2732 bnapi->rx_pg_cons = pg_cons;
2738 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
2740 u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
2742 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2748 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2750 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2751 struct l2_fhdr *rx_hdr;
2752 int rx_pkt = 0, pg_ring_used = 0;
2754 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2755 sw_cons = bnapi->rx_cons;
2756 sw_prod = bnapi->rx_prod;
2758 /* Memory barrier necessary as speculative reads of the rx
2759 * buffer can be ahead of the index in the status block
2762 while (sw_cons != hw_cons) {
2763 unsigned int len, hdr_len;
2765 struct sw_bd *rx_buf;
2766 struct sk_buff *skb;
2767 dma_addr_t dma_addr;
2769 sw_ring_cons = RX_RING_IDX(sw_cons);
2770 sw_ring_prod = RX_RING_IDX(sw_prod);
2772 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2777 dma_addr = pci_unmap_addr(rx_buf, mapping);
2779 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2780 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2781 PCI_DMA_FROMDEVICE);
2783 rx_hdr = (struct l2_fhdr *) skb->data;
2784 len = rx_hdr->l2_fhdr_pkt_len;
2786 if ((status = rx_hdr->l2_fhdr_status) &
2787 (L2_FHDR_ERRORS_BAD_CRC |
2788 L2_FHDR_ERRORS_PHY_DECODE |
2789 L2_FHDR_ERRORS_ALIGNMENT |
2790 L2_FHDR_ERRORS_TOO_SHORT |
2791 L2_FHDR_ERRORS_GIANT_FRAME)) {
2793 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2798 if (status & L2_FHDR_STATUS_SPLIT) {
2799 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2801 } else if (len > bp->rx_jumbo_thresh) {
2802 hdr_len = bp->rx_jumbo_thresh;
2808 if (len <= bp->rx_copy_thresh) {
2809 struct sk_buff *new_skb;
2811 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2812 if (new_skb == NULL) {
2813 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2819 skb_copy_from_linear_data_offset(skb,
2821 new_skb->data, len + 2);
2822 skb_reserve(new_skb, 2);
2823 skb_put(new_skb, len);
2825 bnx2_reuse_rx_skb(bp, bnapi, skb,
2826 sw_ring_cons, sw_ring_prod);
2829 } else if (unlikely(bnx2_rx_skb(bp, bnapi, skb, len, hdr_len,
2830 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
2833 skb->protocol = eth_type_trans(skb, bp->dev);
2835 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2836 (ntohs(skb->protocol) != 0x8100)) {
2843 skb->ip_summed = CHECKSUM_NONE;
2845 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2846 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2848 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2849 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2850 skb->ip_summed = CHECKSUM_UNNECESSARY;
2854 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) {
2855 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2856 rx_hdr->l2_fhdr_vlan_tag);
2860 netif_receive_skb(skb);
2862 bp->dev->last_rx = jiffies;
2866 sw_cons = NEXT_RX_BD(sw_cons);
2867 sw_prod = NEXT_RX_BD(sw_prod);
2869 if ((rx_pkt == budget))
2872 /* Refresh hw_cons to see if there is new work */
2873 if (sw_cons == hw_cons) {
2874 hw_cons = bnx2_get_hw_rx_cons(bnapi);
2878 bnapi->rx_cons = sw_cons;
2879 bnapi->rx_prod = sw_prod;
2882 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2885 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2887 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
2895 /* MSI ISR - The only difference between this and the INTx ISR
2896 * is that the MSI interrupt is always serviced.
2899 bnx2_msi(int irq, void *dev_instance)
2901 struct net_device *dev = dev_instance;
2902 struct bnx2 *bp = netdev_priv(dev);
2903 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2905 prefetch(bnapi->status_blk);
2906 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2907 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2908 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2910 /* Return here if interrupt is disabled. */
2911 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2914 netif_rx_schedule(dev, &bnapi->napi);
2920 bnx2_msi_1shot(int irq, void *dev_instance)
2922 struct net_device *dev = dev_instance;
2923 struct bnx2 *bp = netdev_priv(dev);
2924 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2926 prefetch(bnapi->status_blk);
2928 /* Return here if interrupt is disabled. */
2929 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2932 netif_rx_schedule(dev, &bnapi->napi);
2938 bnx2_interrupt(int irq, void *dev_instance)
2940 struct net_device *dev = dev_instance;
2941 struct bnx2 *bp = netdev_priv(dev);
2942 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
2943 struct status_block *sblk = bnapi->status_blk;
2945 /* When using INTx, it is possible for the interrupt to arrive
2946 * at the CPU before the status block posted prior to the
2947 * interrupt. Reading a register will flush the status block.
2948 * When using MSI, the MSI message will always complete after
2949 * the status block write.
2951 if ((sblk->status_idx == bnapi->last_status_idx) &&
2952 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2953 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2956 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2957 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2958 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2960 /* Read back to deassert IRQ immediately to avoid too many
2961 * spurious interrupts.
2963 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2965 /* Return here if interrupt is shared and is disabled. */
2966 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2969 if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
2970 bnapi->last_status_idx = sblk->status_idx;
2971 __netif_rx_schedule(dev, &bnapi->napi);
2978 bnx2_tx_msix(int irq, void *dev_instance)
2980 struct net_device *dev = dev_instance;
2981 struct bnx2 *bp = netdev_priv(dev);
2982 struct bnx2_napi *bnapi = &bp->bnx2_napi[BNX2_TX_VEC];
2984 prefetch(bnapi->status_blk_msix);
2986 /* Return here if interrupt is disabled. */
2987 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2990 netif_rx_schedule(dev, &bnapi->napi);
2994 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2995 STATUS_ATTN_BITS_TIMER_ABORT)
2998 bnx2_has_work(struct bnx2_napi *bnapi)
3000 struct status_block *sblk = bnapi->status_blk;
3002 if ((bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) ||
3003 (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons))
3006 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3007 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3013 static int bnx2_tx_poll(struct napi_struct *napi, int budget)
3015 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3016 struct bnx2 *bp = bnapi->bp;
3018 struct status_block_msix *sblk = bnapi->status_blk_msix;
3021 work_done += bnx2_tx_int(bp, bnapi, budget - work_done);
3022 if (unlikely(work_done >= budget))
3025 bnapi->last_status_idx = sblk->status_idx;
3027 } while (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons);
3029 netif_rx_complete(bp->dev, napi);
3030 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3031 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3032 bnapi->last_status_idx);
3036 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3037 int work_done, int budget)
3039 struct status_block *sblk = bnapi->status_blk;
3040 u32 status_attn_bits = sblk->status_attn_bits;
3041 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3043 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3044 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3046 bnx2_phy_int(bp, bnapi);
3048 /* This is needed to take care of transient status
3049 * during link changes.
3051 REG_WR(bp, BNX2_HC_COMMAND,
3052 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3053 REG_RD(bp, BNX2_HC_COMMAND);
3056 if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons)
3057 bnx2_tx_int(bp, bnapi, 0);
3059 if (bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons)
3060 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3065 static int bnx2_poll(struct napi_struct *napi, int budget)
3067 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3068 struct bnx2 *bp = bnapi->bp;
3070 struct status_block *sblk = bnapi->status_blk;
3073 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3075 if (unlikely(work_done >= budget))
3078 /* bnapi->last_status_idx is used below to tell the hw how
3079 * much work has been processed, so we must read it before
3080 * checking for more work.
3082 bnapi->last_status_idx = sblk->status_idx;
3084 if (likely(!bnx2_has_work(bnapi))) {
3085 netif_rx_complete(bp->dev, napi);
3086 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3087 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3088 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3089 bnapi->last_status_idx);
3092 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3093 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3094 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3095 bnapi->last_status_idx);
3097 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3098 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3099 bnapi->last_status_idx);
3107 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3108 * from set_multicast.
3111 bnx2_set_rx_mode(struct net_device *dev)
3113 struct bnx2 *bp = netdev_priv(dev);
3114 u32 rx_mode, sort_mode;
3117 spin_lock_bh(&bp->phy_lock);
3119 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3120 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3121 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3123 if (!bp->vlgrp && !(bp->flags & BNX2_FLAG_ASF_ENABLE))
3124 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3126 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
3127 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3129 if (dev->flags & IFF_PROMISC) {
3130 /* Promiscuous mode. */
3131 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3132 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3133 BNX2_RPM_SORT_USER0_PROM_VLAN;
3135 else if (dev->flags & IFF_ALLMULTI) {
3136 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3137 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3140 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3143 /* Accept one or more multicast(s). */
3144 struct dev_mc_list *mclist;
3145 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3150 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3152 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3153 i++, mclist = mclist->next) {
3155 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3157 regidx = (bit & 0xe0) >> 5;
3159 mc_filter[regidx] |= (1 << bit);
3162 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3163 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3167 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3170 if (rx_mode != bp->rx_mode) {
3171 bp->rx_mode = rx_mode;
3172 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3175 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3176 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3177 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3179 spin_unlock_bh(&bp->phy_lock);
3183 load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
3189 if (rv2p_proc == RV2P_PROC2 && CHIP_NUM(bp) == CHIP_NUM_5709) {
3190 val = le32_to_cpu(rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC]);
3191 val &= ~XI_RV2P_PROC2_BD_PAGE_SIZE_MSK;
3192 val |= XI_RV2P_PROC2_BD_PAGE_SIZE;
3193 rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC] = cpu_to_le32(val);
3196 for (i = 0; i < rv2p_code_len; i += 8) {
3197 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
3199 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
3202 if (rv2p_proc == RV2P_PROC1) {
3203 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3204 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3207 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3208 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3212 /* Reset the processor, un-stall is done later. */
3213 if (rv2p_proc == RV2P_PROC1) {
3214 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3217 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3222 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3229 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3230 val |= cpu_reg->mode_value_halt;
3231 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3232 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3234 /* Load the Text area. */
3235 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
3239 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3244 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
3245 bnx2_reg_wr_ind(bp, offset, le32_to_cpu(fw->text[j]));
3249 /* Load the Data area. */
3250 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3254 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3255 bnx2_reg_wr_ind(bp, offset, fw->data[j]);
3259 /* Load the SBSS area. */
3260 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
3264 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
3265 bnx2_reg_wr_ind(bp, offset, 0);
3269 /* Load the BSS area. */
3270 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
3274 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
3275 bnx2_reg_wr_ind(bp, offset, 0);
3279 /* Load the Read-Only area. */
3280 offset = cpu_reg->spad_base +
3281 (fw->rodata_addr - cpu_reg->mips_view_base);
3285 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3286 bnx2_reg_wr_ind(bp, offset, fw->rodata[j]);
3290 /* Clear the pre-fetch instruction. */
3291 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3292 bnx2_reg_wr_ind(bp, cpu_reg->pc, fw->start_addr);
3294 /* Start the CPU. */
3295 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3296 val &= ~cpu_reg->mode_value_halt;
3297 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3298 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3304 bnx2_init_cpus(struct bnx2 *bp)
3306 struct cpu_reg cpu_reg;
3311 /* Initialize the RV2P processor. */
3312 text = vmalloc(FW_BUF_SIZE);
3315 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3316 rv2p = bnx2_xi_rv2p_proc1;
3317 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3319 rv2p = bnx2_rv2p_proc1;
3320 rv2p_len = sizeof(bnx2_rv2p_proc1);
3322 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3326 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
3328 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3329 rv2p = bnx2_xi_rv2p_proc2;
3330 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3332 rv2p = bnx2_rv2p_proc2;
3333 rv2p_len = sizeof(bnx2_rv2p_proc2);
3335 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
3339 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3341 /* Initialize the RX Processor. */
3342 cpu_reg.mode = BNX2_RXP_CPU_MODE;
3343 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3344 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3345 cpu_reg.state = BNX2_RXP_CPU_STATE;
3346 cpu_reg.state_value_clear = 0xffffff;
3347 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3348 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3349 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3350 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3351 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3352 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3353 cpu_reg.mips_view_base = 0x8000000;
3355 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3356 fw = &bnx2_rxp_fw_09;
3358 fw = &bnx2_rxp_fw_06;
3361 rc = load_cpu_fw(bp, &cpu_reg, fw);
3365 /* Initialize the TX Processor. */
3366 cpu_reg.mode = BNX2_TXP_CPU_MODE;
3367 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3368 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3369 cpu_reg.state = BNX2_TXP_CPU_STATE;
3370 cpu_reg.state_value_clear = 0xffffff;
3371 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3372 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3373 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3374 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3375 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3376 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3377 cpu_reg.mips_view_base = 0x8000000;
3379 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3380 fw = &bnx2_txp_fw_09;
3382 fw = &bnx2_txp_fw_06;
3385 rc = load_cpu_fw(bp, &cpu_reg, fw);
3389 /* Initialize the TX Patch-up Processor. */
3390 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3391 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3392 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3393 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3394 cpu_reg.state_value_clear = 0xffffff;
3395 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3396 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3397 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3398 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3399 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3400 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3401 cpu_reg.mips_view_base = 0x8000000;
3403 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3404 fw = &bnx2_tpat_fw_09;
3406 fw = &bnx2_tpat_fw_06;
3409 rc = load_cpu_fw(bp, &cpu_reg, fw);
3413 /* Initialize the Completion Processor. */
3414 cpu_reg.mode = BNX2_COM_CPU_MODE;
3415 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3416 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3417 cpu_reg.state = BNX2_COM_CPU_STATE;
3418 cpu_reg.state_value_clear = 0xffffff;
3419 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3420 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3421 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3422 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3423 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3424 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3425 cpu_reg.mips_view_base = 0x8000000;
3427 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3428 fw = &bnx2_com_fw_09;
3430 fw = &bnx2_com_fw_06;
3433 rc = load_cpu_fw(bp, &cpu_reg, fw);
3437 /* Initialize the Command Processor. */
3438 cpu_reg.mode = BNX2_CP_CPU_MODE;
3439 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3440 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3441 cpu_reg.state = BNX2_CP_CPU_STATE;
3442 cpu_reg.state_value_clear = 0xffffff;
3443 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3444 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3445 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3446 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3447 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3448 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3449 cpu_reg.mips_view_base = 0x8000000;
3451 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3452 fw = &bnx2_cp_fw_09;
3454 fw = &bnx2_cp_fw_06;
3457 rc = load_cpu_fw(bp, &cpu_reg, fw);
3465 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3469 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3475 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3476 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3477 PCI_PM_CTRL_PME_STATUS);
3479 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3480 /* delay required during transition out of D3hot */
3483 val = REG_RD(bp, BNX2_EMAC_MODE);
3484 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3485 val &= ~BNX2_EMAC_MODE_MPKT;
3486 REG_WR(bp, BNX2_EMAC_MODE, val);
3488 val = REG_RD(bp, BNX2_RPM_CONFIG);
3489 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3490 REG_WR(bp, BNX2_RPM_CONFIG, val);
3501 autoneg = bp->autoneg;
3502 advertising = bp->advertising;
3504 if (bp->phy_port == PORT_TP) {
3505 bp->autoneg = AUTONEG_SPEED;
3506 bp->advertising = ADVERTISED_10baseT_Half |
3507 ADVERTISED_10baseT_Full |
3508 ADVERTISED_100baseT_Half |
3509 ADVERTISED_100baseT_Full |
3513 spin_lock_bh(&bp->phy_lock);
3514 bnx2_setup_phy(bp, bp->phy_port);
3515 spin_unlock_bh(&bp->phy_lock);
3517 bp->autoneg = autoneg;
3518 bp->advertising = advertising;
3520 bnx2_set_mac_addr(bp);
3522 val = REG_RD(bp, BNX2_EMAC_MODE);
3524 /* Enable port mode. */
3525 val &= ~BNX2_EMAC_MODE_PORT;
3526 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3527 BNX2_EMAC_MODE_ACPI_RCVD |
3528 BNX2_EMAC_MODE_MPKT;
3529 if (bp->phy_port == PORT_TP)
3530 val |= BNX2_EMAC_MODE_PORT_MII;
3532 val |= BNX2_EMAC_MODE_PORT_GMII;
3533 if (bp->line_speed == SPEED_2500)
3534 val |= BNX2_EMAC_MODE_25G_MODE;
3537 REG_WR(bp, BNX2_EMAC_MODE, val);
3539 /* receive all multicast */
3540 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3541 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3544 REG_WR(bp, BNX2_EMAC_RX_MODE,
3545 BNX2_EMAC_RX_MODE_SORT_MODE);
3547 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3548 BNX2_RPM_SORT_USER0_MC_EN;
3549 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3550 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3551 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3552 BNX2_RPM_SORT_USER0_ENA);
3554 /* Need to enable EMAC and RPM for WOL. */
3555 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3556 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3557 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3558 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3560 val = REG_RD(bp, BNX2_RPM_CONFIG);
3561 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3562 REG_WR(bp, BNX2_RPM_CONFIG, val);
3564 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3567 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3570 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3571 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3573 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3574 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3575 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3584 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3586 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3589 /* No more memory access after this point until
3590 * device is brought back to D0.
3602 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3607 /* Request access to the flash interface. */
3608 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3609 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3610 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3611 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3617 if (j >= NVRAM_TIMEOUT_COUNT)
3624 bnx2_release_nvram_lock(struct bnx2 *bp)
3629 /* Relinquish nvram interface. */
3630 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3632 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3633 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3634 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3640 if (j >= NVRAM_TIMEOUT_COUNT)
3648 bnx2_enable_nvram_write(struct bnx2 *bp)
3652 val = REG_RD(bp, BNX2_MISC_CFG);
3653 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3655 if (bp->flash_info->flags & BNX2_NV_WREN) {
3658 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3659 REG_WR(bp, BNX2_NVM_COMMAND,
3660 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3662 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3665 val = REG_RD(bp, BNX2_NVM_COMMAND);
3666 if (val & BNX2_NVM_COMMAND_DONE)
3670 if (j >= NVRAM_TIMEOUT_COUNT)
3677 bnx2_disable_nvram_write(struct bnx2 *bp)
3681 val = REG_RD(bp, BNX2_MISC_CFG);
3682 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3687 bnx2_enable_nvram_access(struct bnx2 *bp)
3691 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3692 /* Enable both bits, even on read. */
3693 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3694 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3698 bnx2_disable_nvram_access(struct bnx2 *bp)
3702 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3703 /* Disable both bits, even after read. */
3704 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3705 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3706 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3710 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3715 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3716 /* Buffered flash, no erase needed */
3719 /* Build an erase command */
3720 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3721 BNX2_NVM_COMMAND_DOIT;
3723 /* Need to clear DONE bit separately. */
3724 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3726 /* Address of the NVRAM to read from. */
3727 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3729 /* Issue an erase command. */
3730 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3732 /* Wait for completion. */
3733 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3738 val = REG_RD(bp, BNX2_NVM_COMMAND);
3739 if (val & BNX2_NVM_COMMAND_DONE)
3743 if (j >= NVRAM_TIMEOUT_COUNT)
3750 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3755 /* Build the command word. */
3756 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3758 /* Calculate an offset of a buffered flash, not needed for 5709. */
3759 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3760 offset = ((offset / bp->flash_info->page_size) <<
3761 bp->flash_info->page_bits) +
3762 (offset % bp->flash_info->page_size);
3765 /* Need to clear DONE bit separately. */
3766 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3768 /* Address of the NVRAM to read from. */
3769 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3771 /* Issue a read command. */
3772 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3774 /* Wait for completion. */
3775 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3780 val = REG_RD(bp, BNX2_NVM_COMMAND);
3781 if (val & BNX2_NVM_COMMAND_DONE) {
3782 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3783 memcpy(ret_val, &v, 4);
3787 if (j >= NVRAM_TIMEOUT_COUNT)
3795 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3801 /* Build the command word. */
3802 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3804 /* Calculate an offset of a buffered flash, not needed for 5709. */
3805 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3806 offset = ((offset / bp->flash_info->page_size) <<
3807 bp->flash_info->page_bits) +
3808 (offset % bp->flash_info->page_size);
3811 /* Need to clear DONE bit separately. */
3812 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3814 memcpy(&val32, val, 4);
3816 /* Write the data. */
3817 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
3819 /* Address of the NVRAM to write to. */
3820 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3822 /* Issue the write command. */
3823 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3825 /* Wait for completion. */
3826 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3829 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3832 if (j >= NVRAM_TIMEOUT_COUNT)
3839 bnx2_init_nvram(struct bnx2 *bp)
3842 int j, entry_count, rc = 0;
3843 struct flash_spec *flash;
3845 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3846 bp->flash_info = &flash_5709;
3847 goto get_flash_size;
3850 /* Determine the selected interface. */
3851 val = REG_RD(bp, BNX2_NVM_CFG1);
3853 entry_count = ARRAY_SIZE(flash_table);
3855 if (val & 0x40000000) {
3857 /* Flash interface has been reconfigured */
3858 for (j = 0, flash = &flash_table[0]; j < entry_count;
3860 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3861 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3862 bp->flash_info = flash;
3869 /* Not yet been reconfigured */
3871 if (val & (1 << 23))
3872 mask = FLASH_BACKUP_STRAP_MASK;
3874 mask = FLASH_STRAP_MASK;
3876 for (j = 0, flash = &flash_table[0]; j < entry_count;
3879 if ((val & mask) == (flash->strapping & mask)) {
3880 bp->flash_info = flash;
3882 /* Request access to the flash interface. */
3883 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3886 /* Enable access to flash interface */
3887 bnx2_enable_nvram_access(bp);
3889 /* Reconfigure the flash interface */
3890 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3891 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3892 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3893 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3895 /* Disable access to flash interface */
3896 bnx2_disable_nvram_access(bp);
3897 bnx2_release_nvram_lock(bp);
3902 } /* if (val & 0x40000000) */
3904 if (j == entry_count) {
3905 bp->flash_info = NULL;
3906 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3911 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
3912 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3914 bp->flash_size = val;
3916 bp->flash_size = bp->flash_info->total_size;
3922 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3926 u32 cmd_flags, offset32, len32, extra;
3931 /* Request access to the flash interface. */
3932 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3935 /* Enable access to flash interface */
3936 bnx2_enable_nvram_access(bp);
3949 pre_len = 4 - (offset & 3);
3951 if (pre_len >= len32) {
3953 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3954 BNX2_NVM_COMMAND_LAST;
3957 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3960 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3965 memcpy(ret_buf, buf + (offset & 3), pre_len);
3972 extra = 4 - (len32 & 3);
3973 len32 = (len32 + 4) & ~3;
3980 cmd_flags = BNX2_NVM_COMMAND_LAST;
3982 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3983 BNX2_NVM_COMMAND_LAST;
3985 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3987 memcpy(ret_buf, buf, 4 - extra);
3989 else if (len32 > 0) {
3992 /* Read the first word. */
3996 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3998 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4000 /* Advance to the next dword. */
4005 while (len32 > 4 && rc == 0) {
4006 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4008 /* Advance to the next dword. */
4017 cmd_flags = BNX2_NVM_COMMAND_LAST;
4018 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4020 memcpy(ret_buf, buf, 4 - extra);
4023 /* Disable access to flash interface */
4024 bnx2_disable_nvram_access(bp);
4026 bnx2_release_nvram_lock(bp);
4032 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4035 u32 written, offset32, len32;
4036 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4038 int align_start, align_end;
4043 align_start = align_end = 0;
4045 if ((align_start = (offset32 & 3))) {
4047 len32 += align_start;
4050 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4055 align_end = 4 - (len32 & 3);
4057 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4061 if (align_start || align_end) {
4062 align_buf = kmalloc(len32, GFP_KERNEL);
4063 if (align_buf == NULL)
4066 memcpy(align_buf, start, 4);
4069 memcpy(align_buf + len32 - 4, end, 4);
4071 memcpy(align_buf + align_start, data_buf, buf_size);
4075 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4076 flash_buffer = kmalloc(264, GFP_KERNEL);
4077 if (flash_buffer == NULL) {
4079 goto nvram_write_end;
4084 while ((written < len32) && (rc == 0)) {
4085 u32 page_start, page_end, data_start, data_end;
4086 u32 addr, cmd_flags;
4089 /* Find the page_start addr */
4090 page_start = offset32 + written;
4091 page_start -= (page_start % bp->flash_info->page_size);
4092 /* Find the page_end addr */
4093 page_end = page_start + bp->flash_info->page_size;
4094 /* Find the data_start addr */
4095 data_start = (written == 0) ? offset32 : page_start;
4096 /* Find the data_end addr */
4097 data_end = (page_end > offset32 + len32) ?
4098 (offset32 + len32) : page_end;
4100 /* Request access to the flash interface. */
4101 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4102 goto nvram_write_end;
4104 /* Enable access to flash interface */
4105 bnx2_enable_nvram_access(bp);
4107 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4108 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4111 /* Read the whole page into the buffer
4112 * (non-buffer flash only) */
4113 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4114 if (j == (bp->flash_info->page_size - 4)) {
4115 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4117 rc = bnx2_nvram_read_dword(bp,
4123 goto nvram_write_end;
4129 /* Enable writes to flash interface (unlock write-protect) */
4130 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4131 goto nvram_write_end;
4133 /* Loop to write back the buffer data from page_start to
4136 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4137 /* Erase the page */
4138 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4139 goto nvram_write_end;
4141 /* Re-enable the write again for the actual write */
4142 bnx2_enable_nvram_write(bp);
4144 for (addr = page_start; addr < data_start;
4145 addr += 4, i += 4) {
4147 rc = bnx2_nvram_write_dword(bp, addr,
4148 &flash_buffer[i], cmd_flags);
4151 goto nvram_write_end;
4157 /* Loop to write the new data from data_start to data_end */
4158 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4159 if ((addr == page_end - 4) ||
4160 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4161 (addr == data_end - 4))) {
4163 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4165 rc = bnx2_nvram_write_dword(bp, addr, buf,
4169 goto nvram_write_end;
4175 /* Loop to write back the buffer data from data_end
4177 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4178 for (addr = data_end; addr < page_end;
4179 addr += 4, i += 4) {
4181 if (addr == page_end-4) {
4182 cmd_flags = BNX2_NVM_COMMAND_LAST;
4184 rc = bnx2_nvram_write_dword(bp, addr,
4185 &flash_buffer[i], cmd_flags);
4188 goto nvram_write_end;
4194 /* Disable writes to flash interface (lock write-protect) */
4195 bnx2_disable_nvram_write(bp);
4197 /* Disable access to flash interface */
4198 bnx2_disable_nvram_access(bp);
4199 bnx2_release_nvram_lock(bp);
4201 /* Increment written */
4202 written += data_end - data_start;
4206 kfree(flash_buffer);
4212 bnx2_init_remote_phy(struct bnx2 *bp)
4216 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4217 if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES))
4220 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4221 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4224 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
4225 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4227 val = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4228 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4229 bp->phy_port = PORT_FIBRE;
4231 bp->phy_port = PORT_TP;
4233 if (netif_running(bp->dev)) {
4236 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4237 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4238 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4244 bnx2_setup_msix_tbl(struct bnx2 *bp)
4246 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4248 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4249 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4253 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4259 /* Wait for the current PCI transaction to complete before
4260 * issuing a reset. */
4261 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4262 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4263 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4264 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4265 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4266 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4269 /* Wait for the firmware to tell us it is ok to issue a reset. */
4270 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4272 /* Deposit a driver reset signature so the firmware knows that
4273 * this is a soft reset. */
4274 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4275 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4277 /* Do a dummy read to force the chip to complete all current transaction
4278 * before we issue a reset. */
4279 val = REG_RD(bp, BNX2_MISC_ID);
4281 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4282 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4283 REG_RD(bp, BNX2_MISC_COMMAND);
4286 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4287 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4289 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4292 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4293 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4294 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4297 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4299 /* Reading back any register after chip reset will hang the
4300 * bus on 5706 A0 and A1. The msleep below provides plenty
4301 * of margin for write posting.
4303 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4304 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4307 /* Reset takes approximate 30 usec */
4308 for (i = 0; i < 10; i++) {
4309 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4310 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4311 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4316 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4317 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4318 printk(KERN_ERR PFX "Chip reset did not complete\n");
4323 /* Make sure byte swapping is properly configured. */
4324 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4325 if (val != 0x01020304) {
4326 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4330 /* Wait for the firmware to finish its initialization. */
4331 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4335 spin_lock_bh(&bp->phy_lock);
4336 old_port = bp->phy_port;
4337 bnx2_init_remote_phy(bp);
4338 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4339 old_port != bp->phy_port)
4340 bnx2_set_default_remote_link(bp);
4341 spin_unlock_bh(&bp->phy_lock);
4343 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4344 /* Adjust the voltage regular to two steps lower. The default
4345 * of this register is 0x0000000e. */
4346 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4348 /* Remove bad rbuf memory from the free pool. */
4349 rc = bnx2_alloc_bad_rbuf(bp);
4352 if (bp->flags & BNX2_FLAG_USING_MSIX)
4353 bnx2_setup_msix_tbl(bp);
4359 bnx2_init_chip(struct bnx2 *bp)
4364 /* Make sure the interrupt is not active. */
4365 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4367 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4368 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4370 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4372 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4373 DMA_READ_CHANS << 12 |
4374 DMA_WRITE_CHANS << 16;
4376 val |= (0x2 << 20) | (1 << 11);
4378 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4381 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4382 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4383 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4385 REG_WR(bp, BNX2_DMA_CONFIG, val);
4387 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4388 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4389 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4390 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4393 if (bp->flags & BNX2_FLAG_PCIX) {
4396 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4398 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4399 val16 & ~PCI_X_CMD_ERO);
4402 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4403 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4404 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4405 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4407 /* Initialize context mapping and zero out the quick contexts. The
4408 * context block must have already been enabled. */
4409 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4410 rc = bnx2_init_5709_context(bp);
4414 bnx2_init_context(bp);
4416 if ((rc = bnx2_init_cpus(bp)) != 0)
4419 bnx2_init_nvram(bp);
4421 bnx2_set_mac_addr(bp);
4423 val = REG_RD(bp, BNX2_MQ_CONFIG);
4424 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4425 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4426 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4427 val |= BNX2_MQ_CONFIG_HALT_DIS;
4429 REG_WR(bp, BNX2_MQ_CONFIG, val);
4431 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4432 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4433 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4435 val = (BCM_PAGE_BITS - 8) << 24;
4436 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4438 /* Configure page size. */
4439 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4440 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4441 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4442 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4444 val = bp->mac_addr[0] +
4445 (bp->mac_addr[1] << 8) +
4446 (bp->mac_addr[2] << 16) +
4448 (bp->mac_addr[4] << 8) +
4449 (bp->mac_addr[5] << 16);
4450 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4452 /* Program the MTU. Also include 4 bytes for CRC32. */
4453 val = bp->dev->mtu + ETH_HLEN + 4;
4454 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4455 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4456 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4458 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4459 bp->bnx2_napi[i].last_status_idx = 0;
4461 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4463 /* Set up how to generate a link change interrupt. */
4464 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4466 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4467 (u64) bp->status_blk_mapping & 0xffffffff);
4468 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4470 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4471 (u64) bp->stats_blk_mapping & 0xffffffff);
4472 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4473 (u64) bp->stats_blk_mapping >> 32);
4475 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4476 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4478 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4479 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4481 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4482 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4484 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4486 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4488 REG_WR(bp, BNX2_HC_COM_TICKS,
4489 (bp->com_ticks_int << 16) | bp->com_ticks);
4491 REG_WR(bp, BNX2_HC_CMD_TICKS,
4492 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4494 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4495 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4497 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4498 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4500 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4501 val = BNX2_HC_CONFIG_COLLECT_STATS;
4503 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4504 BNX2_HC_CONFIG_COLLECT_STATS;
4507 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4508 u32 base = ((BNX2_TX_VEC - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4509 BNX2_HC_SB_CONFIG_1;
4511 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4512 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4515 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4516 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4518 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4519 (bp->tx_quick_cons_trip_int << 16) |
4520 bp->tx_quick_cons_trip);
4522 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4523 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4525 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4528 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4529 val |= BNX2_HC_CONFIG_ONE_SHOT;
4531 REG_WR(bp, BNX2_HC_CONFIG, val);
4533 /* Clear internal stats counters. */
4534 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4536 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4538 /* Initialize the receive filter. */
4539 bnx2_set_rx_mode(bp->dev);
4541 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4542 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4543 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4544 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4546 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4549 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4550 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4554 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4560 bnx2_clear_ring_states(struct bnx2 *bp)
4562 struct bnx2_napi *bnapi;
4565 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4566 bnapi = &bp->bnx2_napi[i];
4569 bnapi->hw_tx_cons = 0;
4570 bnapi->rx_prod_bseq = 0;
4573 bnapi->rx_pg_prod = 0;
4574 bnapi->rx_pg_cons = 0;
4579 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4581 u32 val, offset0, offset1, offset2, offset3;
4582 u32 cid_addr = GET_CID_ADDR(cid);
4584 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4585 offset0 = BNX2_L2CTX_TYPE_XI;
4586 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4587 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4588 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4590 offset0 = BNX2_L2CTX_TYPE;
4591 offset1 = BNX2_L2CTX_CMD_TYPE;
4592 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4593 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4595 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4596 bnx2_ctx_wr(bp, cid_addr, offset0, val);
4598 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4599 bnx2_ctx_wr(bp, cid_addr, offset1, val);
4601 val = (u64) bp->tx_desc_mapping >> 32;
4602 bnx2_ctx_wr(bp, cid_addr, offset2, val);
4604 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4605 bnx2_ctx_wr(bp, cid_addr, offset3, val);
4609 bnx2_init_tx_ring(struct bnx2 *bp)
4613 struct bnx2_napi *bnapi;
4616 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4618 bp->tx_vec = BNX2_TX_VEC;
4619 REG_WR(bp, BNX2_TSCH_TSS_CFG, BNX2_TX_INT_NUM |
4622 bnapi = &bp->bnx2_napi[bp->tx_vec];
4624 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4626 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4628 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4629 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4632 bp->tx_prod_bseq = 0;
4634 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4635 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4637 bnx2_init_tx_context(bp, cid);
4641 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4647 for (i = 0; i < num_rings; i++) {
4650 rxbd = &rx_ring[i][0];
4651 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4652 rxbd->rx_bd_len = buf_size;
4653 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4655 if (i == (num_rings - 1))
4659 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4660 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4665 bnx2_init_rx_ring(struct bnx2 *bp)
4668 u16 prod, ring_prod;
4669 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4670 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
4672 bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4673 bp->rx_buf_use_size, bp->rx_max_ring);
4675 bnx2_init_rx_context0(bp);
4677 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4678 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4679 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4682 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4683 if (bp->rx_pg_ring_size) {
4684 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4685 bp->rx_pg_desc_mapping,
4686 PAGE_SIZE, bp->rx_max_pg_ring);
4687 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4688 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4689 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4690 BNX2_L2CTX_RBDC_JUMBO_KEY);
4692 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4693 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4695 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4696 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4698 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4699 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4702 val = (u64) bp->rx_desc_mapping[0] >> 32;
4703 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4705 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4706 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4708 ring_prod = prod = bnapi->rx_pg_prod;
4709 for (i = 0; i < bp->rx_pg_ring_size; i++) {
4710 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4712 prod = NEXT_RX_BD(prod);
4713 ring_prod = RX_PG_RING_IDX(prod);
4715 bnapi->rx_pg_prod = prod;
4717 ring_prod = prod = bnapi->rx_prod;
4718 for (i = 0; i < bp->rx_ring_size; i++) {
4719 if (bnx2_alloc_rx_skb(bp, bnapi, ring_prod) < 0) {
4722 prod = NEXT_RX_BD(prod);
4723 ring_prod = RX_RING_IDX(prod);
4725 bnapi->rx_prod = prod;
4727 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
4729 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4731 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
4734 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4736 u32 max, num_rings = 1;
4738 while (ring_size > MAX_RX_DESC_CNT) {
4739 ring_size -= MAX_RX_DESC_CNT;
4742 /* round to next power of 2 */
4744 while ((max & num_rings) == 0)
4747 if (num_rings != max)
4754 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4756 u32 rx_size, rx_space, jumbo_size;
4758 /* 8 for CRC and VLAN */
4759 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
4761 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4762 sizeof(struct skb_shared_info);
4764 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
4765 bp->rx_pg_ring_size = 0;
4766 bp->rx_max_pg_ring = 0;
4767 bp->rx_max_pg_ring_idx = 0;
4768 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
4769 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4771 jumbo_size = size * pages;
4772 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4773 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4775 bp->rx_pg_ring_size = jumbo_size;
4776 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4778 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4779 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
4780 bp->rx_copy_thresh = 0;
4783 bp->rx_buf_use_size = rx_size;
4785 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4786 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
4787 bp->rx_ring_size = size;
4788 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4789 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4793 bnx2_free_tx_skbs(struct bnx2 *bp)
4797 if (bp->tx_buf_ring == NULL)
4800 for (i = 0; i < TX_DESC_CNT; ) {
4801 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4802 struct sk_buff *skb = tx_buf->skb;
4810 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4811 skb_headlen(skb), PCI_DMA_TODEVICE);
4815 last = skb_shinfo(skb)->nr_frags;
4816 for (j = 0; j < last; j++) {
4817 tx_buf = &bp->tx_buf_ring[i + j + 1];
4818 pci_unmap_page(bp->pdev,
4819 pci_unmap_addr(tx_buf, mapping),
4820 skb_shinfo(skb)->frags[j].size,
4830 bnx2_free_rx_skbs(struct bnx2 *bp)
4834 if (bp->rx_buf_ring == NULL)
4837 for (i = 0; i < bp->rx_max_ring_idx; i++) {
4838 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4839 struct sk_buff *skb = rx_buf->skb;
4844 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4845 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4851 for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4852 bnx2_free_rx_page(bp, i);
4856 bnx2_free_skbs(struct bnx2 *bp)
4858 bnx2_free_tx_skbs(bp);
4859 bnx2_free_rx_skbs(bp);
4863 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4867 rc = bnx2_reset_chip(bp, reset_code);
4872 if ((rc = bnx2_init_chip(bp)) != 0)
4875 bnx2_clear_ring_states(bp);
4876 bnx2_init_tx_ring(bp);
4877 bnx2_init_rx_ring(bp);
4882 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
4886 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4889 spin_lock_bh(&bp->phy_lock);
4890 bnx2_init_phy(bp, reset_phy);
4892 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
4893 bnx2_remote_phy_event(bp);
4894 spin_unlock_bh(&bp->phy_lock);
4899 bnx2_test_registers(struct bnx2 *bp)
4903 static const struct {
4906 #define BNX2_FL_NOT_5709 1
4910 { 0x006c, 0, 0x00000000, 0x0000003f },
4911 { 0x0090, 0, 0xffffffff, 0x00000000 },
4912 { 0x0094, 0, 0x00000000, 0x00000000 },
4914 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4915 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4916 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4917 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4918 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4919 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4920 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4921 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4922 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4924 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4925 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4926 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4927 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4928 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4929 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4931 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4932 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4933 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4935 { 0x1000, 0, 0x00000000, 0x00000001 },
4936 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
4938 { 0x1408, 0, 0x01c00800, 0x00000000 },
4939 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4940 { 0x14a8, 0, 0x00000000, 0x000001ff },
4941 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4942 { 0x14b0, 0, 0x00000002, 0x00000001 },
4943 { 0x14b8, 0, 0x00000000, 0x00000000 },
4944 { 0x14c0, 0, 0x00000000, 0x00000009 },
4945 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4946 { 0x14cc, 0, 0x00000000, 0x00000001 },
4947 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4949 { 0x1800, 0, 0x00000000, 0x00000001 },
4950 { 0x1804, 0, 0x00000000, 0x00000003 },
4952 { 0x2800, 0, 0x00000000, 0x00000001 },
4953 { 0x2804, 0, 0x00000000, 0x00003f01 },
4954 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4955 { 0x2810, 0, 0xffff0000, 0x00000000 },
4956 { 0x2814, 0, 0xffff0000, 0x00000000 },
4957 { 0x2818, 0, 0xffff0000, 0x00000000 },
4958 { 0x281c, 0, 0xffff0000, 0x00000000 },
4959 { 0x2834, 0, 0xffffffff, 0x00000000 },
4960 { 0x2840, 0, 0x00000000, 0xffffffff },
4961 { 0x2844, 0, 0x00000000, 0xffffffff },
4962 { 0x2848, 0, 0xffffffff, 0x00000000 },
4963 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4965 { 0x2c00, 0, 0x00000000, 0x00000011 },
4966 { 0x2c04, 0, 0x00000000, 0x00030007 },
4968 { 0x3c00, 0, 0x00000000, 0x00000001 },
4969 { 0x3c04, 0, 0x00000000, 0x00070000 },
4970 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4971 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4972 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4973 { 0x3c14, 0, 0x00000000, 0xffffffff },
4974 { 0x3c18, 0, 0x00000000, 0xffffffff },
4975 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4976 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4978 { 0x5004, 0, 0x00000000, 0x0000007f },
4979 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4981 { 0x5c00, 0, 0x00000000, 0x00000001 },
4982 { 0x5c04, 0, 0x00000000, 0x0003000f },
4983 { 0x5c08, 0, 0x00000003, 0x00000000 },
4984 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4985 { 0x5c10, 0, 0x00000000, 0xffffffff },
4986 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4987 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4988 { 0x5c88, 0, 0x00000000, 0x00077373 },
4989 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4991 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4992 { 0x680c, 0, 0xffffffff, 0x00000000 },
4993 { 0x6810, 0, 0xffffffff, 0x00000000 },
4994 { 0x6814, 0, 0xffffffff, 0x00000000 },
4995 { 0x6818, 0, 0xffffffff, 0x00000000 },
4996 { 0x681c, 0, 0xffffffff, 0x00000000 },
4997 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4998 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4999 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5000 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5001 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5002 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5003 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5004 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5005 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5006 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5007 { 0x684c, 0, 0xffffffff, 0x00000000 },
5008 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5009 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5010 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5011 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5012 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5013 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5015 { 0xffff, 0, 0x00000000, 0x00000000 },
5020 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5023 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5024 u32 offset, rw_mask, ro_mask, save_val, val;
5025 u16 flags = reg_tbl[i].flags;
5027 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5030 offset = (u32) reg_tbl[i].offset;
5031 rw_mask = reg_tbl[i].rw_mask;
5032 ro_mask = reg_tbl[i].ro_mask;
5034 save_val = readl(bp->regview + offset);
5036 writel(0, bp->regview + offset);
5038 val = readl(bp->regview + offset);
5039 if ((val & rw_mask) != 0) {
5043 if ((val & ro_mask) != (save_val & ro_mask)) {
5047 writel(0xffffffff, bp->regview + offset);
5049 val = readl(bp->regview + offset);
5050 if ((val & rw_mask) != rw_mask) {
5054 if ((val & ro_mask) != (save_val & ro_mask)) {
5058 writel(save_val, bp->regview + offset);
5062 writel(save_val, bp->regview + offset);
5070 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5072 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5073 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5076 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5079 for (offset = 0; offset < size; offset += 4) {
5081 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5083 if (bnx2_reg_rd_ind(bp, start + offset) !=
5093 bnx2_test_memory(struct bnx2 *bp)
5097 static struct mem_entry {
5100 } mem_tbl_5706[] = {
5101 { 0x60000, 0x4000 },
5102 { 0xa0000, 0x3000 },
5103 { 0xe0000, 0x4000 },
5104 { 0x120000, 0x4000 },
5105 { 0x1a0000, 0x4000 },
5106 { 0x160000, 0x4000 },
5110 { 0x60000, 0x4000 },
5111 { 0xa0000, 0x3000 },
5112 { 0xe0000, 0x4000 },
5113 { 0x120000, 0x4000 },
5114 { 0x1a0000, 0x4000 },
5117 struct mem_entry *mem_tbl;
5119 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5120 mem_tbl = mem_tbl_5709;
5122 mem_tbl = mem_tbl_5706;
5124 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5125 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5126 mem_tbl[i].len)) != 0) {
5134 #define BNX2_MAC_LOOPBACK 0
5135 #define BNX2_PHY_LOOPBACK 1
5138 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5140 unsigned int pkt_size, num_pkts, i;
5141 struct sk_buff *skb, *rx_skb;
5142 unsigned char *packet;
5143 u16 rx_start_idx, rx_idx;
5146 struct sw_bd *rx_buf;
5147 struct l2_fhdr *rx_hdr;
5149 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5152 if (bp->flags & BNX2_FLAG_USING_MSIX)
5153 tx_napi = &bp->bnx2_napi[BNX2_TX_VEC];
5155 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5156 bp->loopback = MAC_LOOPBACK;
5157 bnx2_set_mac_loopback(bp);
5159 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5160 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5163 bp->loopback = PHY_LOOPBACK;
5164 bnx2_set_phy_loopback(bp);
5169 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5170 skb = netdev_alloc_skb(bp->dev, pkt_size);
5173 packet = skb_put(skb, pkt_size);
5174 memcpy(packet, bp->dev->dev_addr, 6);
5175 memset(packet + 6, 0x0, 8);
5176 for (i = 14; i < pkt_size; i++)
5177 packet[i] = (unsigned char) (i & 0xff);
5179 map = pci_map_single(bp->pdev, skb->data, pkt_size,
5182 REG_WR(bp, BNX2_HC_COMMAND,
5183 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5185 REG_RD(bp, BNX2_HC_COMMAND);
5188 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5192 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
5194 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5195 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5196 txbd->tx_bd_mss_nbytes = pkt_size;
5197 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5200 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
5201 bp->tx_prod_bseq += pkt_size;
5203 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
5204 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5208 REG_WR(bp, BNX2_HC_COMMAND,
5209 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5211 REG_RD(bp, BNX2_HC_COMMAND);
5215 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5218 if (bnx2_get_hw_tx_cons(tx_napi) != bp->tx_prod)
5219 goto loopback_test_done;
5221 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5222 if (rx_idx != rx_start_idx + num_pkts) {
5223 goto loopback_test_done;
5226 rx_buf = &bp->rx_buf_ring[rx_start_idx];
5227 rx_skb = rx_buf->skb;
5229 rx_hdr = (struct l2_fhdr *) rx_skb->data;
5230 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5232 pci_dma_sync_single_for_cpu(bp->pdev,
5233 pci_unmap_addr(rx_buf, mapping),
5234 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5236 if (rx_hdr->l2_fhdr_status &
5237 (L2_FHDR_ERRORS_BAD_CRC |
5238 L2_FHDR_ERRORS_PHY_DECODE |
5239 L2_FHDR_ERRORS_ALIGNMENT |
5240 L2_FHDR_ERRORS_TOO_SHORT |
5241 L2_FHDR_ERRORS_GIANT_FRAME)) {
5243 goto loopback_test_done;
5246 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5247 goto loopback_test_done;
5250 for (i = 14; i < pkt_size; i++) {
5251 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5252 goto loopback_test_done;
5263 #define BNX2_MAC_LOOPBACK_FAILED 1
5264 #define BNX2_PHY_LOOPBACK_FAILED 2
5265 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5266 BNX2_PHY_LOOPBACK_FAILED)
5269 bnx2_test_loopback(struct bnx2 *bp)
5273 if (!netif_running(bp->dev))
5274 return BNX2_LOOPBACK_FAILED;
5276 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5277 spin_lock_bh(&bp->phy_lock);
5278 bnx2_init_phy(bp, 1);
5279 spin_unlock_bh(&bp->phy_lock);
5280 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5281 rc |= BNX2_MAC_LOOPBACK_FAILED;
5282 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5283 rc |= BNX2_PHY_LOOPBACK_FAILED;
5287 #define NVRAM_SIZE 0x200
5288 #define CRC32_RESIDUAL 0xdebb20e3
5291 bnx2_test_nvram(struct bnx2 *bp)
5293 __be32 buf[NVRAM_SIZE / 4];
5294 u8 *data = (u8 *) buf;
5298 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5299 goto test_nvram_done;
5301 magic = be32_to_cpu(buf[0]);
5302 if (magic != 0x669955aa) {
5304 goto test_nvram_done;
5307 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5308 goto test_nvram_done;
5310 csum = ether_crc_le(0x100, data);
5311 if (csum != CRC32_RESIDUAL) {
5313 goto test_nvram_done;
5316 csum = ether_crc_le(0x100, data + 0x100);
5317 if (csum != CRC32_RESIDUAL) {
5326 bnx2_test_link(struct bnx2 *bp)
5330 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5335 spin_lock_bh(&bp->phy_lock);
5336 bnx2_enable_bmsr1(bp);
5337 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5338 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5339 bnx2_disable_bmsr1(bp);
5340 spin_unlock_bh(&bp->phy_lock);
5342 if (bmsr & BMSR_LSTATUS) {
5349 bnx2_test_intr(struct bnx2 *bp)
5354 if (!netif_running(bp->dev))
5357 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5359 /* This register is not touched during run-time. */
5360 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5361 REG_RD(bp, BNX2_HC_COMMAND);
5363 for (i = 0; i < 10; i++) {
5364 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5370 msleep_interruptible(10);
5378 /* Determining link for parallel detection. */
5380 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5382 u32 mode_ctl, an_dbg, exp;
5384 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5387 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5388 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5390 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5393 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5394 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5395 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5397 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5400 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5401 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5402 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5404 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5411 bnx2_5706_serdes_timer(struct bnx2 *bp)
5415 spin_lock(&bp->phy_lock);
5416 if (bp->serdes_an_pending) {
5417 bp->serdes_an_pending--;
5419 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5422 bp->current_interval = bp->timer_interval;
5424 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5426 if (bmcr & BMCR_ANENABLE) {
5427 if (bnx2_5706_serdes_has_link(bp)) {
5428 bmcr &= ~BMCR_ANENABLE;
5429 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5430 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5431 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5435 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5436 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5439 bnx2_write_phy(bp, 0x17, 0x0f01);
5440 bnx2_read_phy(bp, 0x15, &phy2);
5444 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5445 bmcr |= BMCR_ANENABLE;
5446 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5448 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5451 bp->current_interval = bp->timer_interval;
5456 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5457 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5458 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5460 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5461 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5462 bnx2_5706s_force_link_dn(bp, 1);
5463 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5466 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5469 spin_unlock(&bp->phy_lock);
5473 bnx2_5708_serdes_timer(struct bnx2 *bp)
5475 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5478 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5479 bp->serdes_an_pending = 0;
5483 spin_lock(&bp->phy_lock);
5484 if (bp->serdes_an_pending)
5485 bp->serdes_an_pending--;
5486 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5489 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5490 if (bmcr & BMCR_ANENABLE) {
5491 bnx2_enable_forced_2g5(bp);
5492 bp->current_interval = SERDES_FORCED_TIMEOUT;
5494 bnx2_disable_forced_2g5(bp);
5495 bp->serdes_an_pending = 2;
5496 bp->current_interval = bp->timer_interval;
5500 bp->current_interval = bp->timer_interval;
5502 spin_unlock(&bp->phy_lock);
5506 bnx2_timer(unsigned long data)
5508 struct bnx2 *bp = (struct bnx2 *) data;
5510 if (!netif_running(bp->dev))
5513 if (atomic_read(&bp->intr_sem) != 0)
5514 goto bnx2_restart_timer;
5516 bnx2_send_heart_beat(bp);
5518 bp->stats_blk->stat_FwRxDrop =
5519 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
5521 /* workaround occasional corrupted counters */
5522 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5523 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5524 BNX2_HC_COMMAND_STATS_NOW);
5526 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5527 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5528 bnx2_5706_serdes_timer(bp);
5530 bnx2_5708_serdes_timer(bp);
5534 mod_timer(&bp->timer, jiffies + bp->current_interval);
5538 bnx2_request_irq(struct bnx2 *bp)
5540 struct net_device *dev = bp->dev;
5541 unsigned long flags;
5542 struct bnx2_irq *irq;
5545 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
5548 flags = IRQF_SHARED;
5550 for (i = 0; i < bp->irq_nvecs; i++) {
5551 irq = &bp->irq_tbl[i];
5552 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5562 bnx2_free_irq(struct bnx2 *bp)
5564 struct net_device *dev = bp->dev;
5565 struct bnx2_irq *irq;
5568 for (i = 0; i < bp->irq_nvecs; i++) {
5569 irq = &bp->irq_tbl[i];
5571 free_irq(irq->vector, dev);
5574 if (bp->flags & BNX2_FLAG_USING_MSI)
5575 pci_disable_msi(bp->pdev);
5576 else if (bp->flags & BNX2_FLAG_USING_MSIX)
5577 pci_disable_msix(bp->pdev);
5579 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
5583 bnx2_enable_msix(struct bnx2 *bp)
5586 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5588 bnx2_setup_msix_tbl(bp);
5589 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5590 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5591 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
5593 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5594 msix_ent[i].entry = i;
5595 msix_ent[i].vector = 0;
5598 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5602 bp->irq_tbl[BNX2_BASE_VEC].handler = bnx2_msi_1shot;
5603 bp->irq_tbl[BNX2_TX_VEC].handler = bnx2_tx_msix;
5605 strcpy(bp->irq_tbl[BNX2_BASE_VEC].name, bp->dev->name);
5606 strcat(bp->irq_tbl[BNX2_BASE_VEC].name, "-base");
5607 strcpy(bp->irq_tbl[BNX2_TX_VEC].name, bp->dev->name);
5608 strcat(bp->irq_tbl[BNX2_TX_VEC].name, "-tx");
5610 bp->irq_nvecs = BNX2_MAX_MSIX_VEC;
5611 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
5612 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5613 bp->irq_tbl[i].vector = msix_ent[i].vector;
5617 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5619 bp->irq_tbl[0].handler = bnx2_interrupt;
5620 strcpy(bp->irq_tbl[0].name, bp->dev->name);
5622 bp->irq_tbl[0].vector = bp->pdev->irq;
5624 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
5625 bnx2_enable_msix(bp);
5627 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5628 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
5629 if (pci_enable_msi(bp->pdev) == 0) {
5630 bp->flags |= BNX2_FLAG_USING_MSI;
5631 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5632 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
5633 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5635 bp->irq_tbl[0].handler = bnx2_msi;
5637 bp->irq_tbl[0].vector = bp->pdev->irq;
5642 /* Called with rtnl_lock */
5644 bnx2_open(struct net_device *dev)
5646 struct bnx2 *bp = netdev_priv(dev);
5649 netif_carrier_off(dev);
5651 bnx2_set_power_state(bp, PCI_D0);
5652 bnx2_disable_int(bp);
5654 rc = bnx2_alloc_mem(bp);
5658 bnx2_setup_int_mode(bp, disable_msi);
5659 bnx2_napi_enable(bp);
5660 rc = bnx2_request_irq(bp);
5663 bnx2_napi_disable(bp);
5668 rc = bnx2_init_nic(bp, 1);
5671 bnx2_napi_disable(bp);
5678 mod_timer(&bp->timer, jiffies + bp->current_interval);
5680 atomic_set(&bp->intr_sem, 0);
5682 bnx2_enable_int(bp);
5684 if (bp->flags & BNX2_FLAG_USING_MSI) {
5685 /* Test MSI to make sure it is working
5686 * If MSI test fails, go back to INTx mode
5688 if (bnx2_test_intr(bp) != 0) {
5689 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5690 " using MSI, switching to INTx mode. Please"
5691 " report this failure to the PCI maintainer"
5692 " and include system chipset information.\n",
5695 bnx2_disable_int(bp);
5698 bnx2_setup_int_mode(bp, 1);
5700 rc = bnx2_init_nic(bp, 0);
5703 rc = bnx2_request_irq(bp);
5706 bnx2_napi_disable(bp);
5709 del_timer_sync(&bp->timer);
5712 bnx2_enable_int(bp);
5715 if (bp->flags & BNX2_FLAG_USING_MSI)
5716 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5717 else if (bp->flags & BNX2_FLAG_USING_MSIX)
5718 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
5720 netif_start_queue(dev);
5726 bnx2_reset_task(struct work_struct *work)
5728 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5730 if (!netif_running(bp->dev))
5733 bp->in_reset_task = 1;
5734 bnx2_netif_stop(bp);
5736 bnx2_init_nic(bp, 1);
5738 atomic_set(&bp->intr_sem, 1);
5739 bnx2_netif_start(bp);
5740 bp->in_reset_task = 0;
5744 bnx2_tx_timeout(struct net_device *dev)
5746 struct bnx2 *bp = netdev_priv(dev);
5748 /* This allows the netif to be shutdown gracefully before resetting */
5749 schedule_work(&bp->reset_task);
5753 /* Called with rtnl_lock */
5755 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5757 struct bnx2 *bp = netdev_priv(dev);
5759 bnx2_netif_stop(bp);
5762 bnx2_set_rx_mode(dev);
5764 bnx2_netif_start(bp);
5768 /* Called with netif_tx_lock.
5769 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5770 * netif_wake_queue().
5773 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5775 struct bnx2 *bp = netdev_priv(dev);
5778 struct sw_bd *tx_buf;
5779 u32 len, vlan_tag_flags, last_frag, mss;
5780 u16 prod, ring_prod;
5782 struct bnx2_napi *bnapi = &bp->bnx2_napi[bp->tx_vec];
5784 if (unlikely(bnx2_tx_avail(bp, bnapi) <
5785 (skb_shinfo(skb)->nr_frags + 1))) {
5786 netif_stop_queue(dev);
5787 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5790 return NETDEV_TX_BUSY;
5792 len = skb_headlen(skb);
5794 ring_prod = TX_RING_IDX(prod);
5797 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5798 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5801 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
5803 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5805 if ((mss = skb_shinfo(skb)->gso_size)) {
5806 u32 tcp_opt_len, ip_tcp_len;
5809 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5811 tcp_opt_len = tcp_optlen(skb);
5813 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5814 u32 tcp_off = skb_transport_offset(skb) -
5815 sizeof(struct ipv6hdr) - ETH_HLEN;
5817 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5818 TX_BD_FLAGS_SW_FLAGS;
5819 if (likely(tcp_off == 0))
5820 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5823 vlan_tag_flags |= ((tcp_off & 0x3) <<
5824 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5825 ((tcp_off & 0x10) <<
5826 TX_BD_FLAGS_TCP6_OFF4_SHL);
5827 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5830 if (skb_header_cloned(skb) &&
5831 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5833 return NETDEV_TX_OK;
5836 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5840 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5841 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5845 if (tcp_opt_len || (iph->ihl > 5)) {
5846 vlan_tag_flags |= ((iph->ihl - 5) +
5847 (tcp_opt_len >> 2)) << 8;
5853 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5855 tx_buf = &bp->tx_buf_ring[ring_prod];
5857 pci_unmap_addr_set(tx_buf, mapping, mapping);
5859 txbd = &bp->tx_desc_ring[ring_prod];
5861 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5862 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5863 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5864 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5866 last_frag = skb_shinfo(skb)->nr_frags;
5868 for (i = 0; i < last_frag; i++) {
5869 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5871 prod = NEXT_TX_BD(prod);
5872 ring_prod = TX_RING_IDX(prod);
5873 txbd = &bp->tx_desc_ring[ring_prod];
5876 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5877 len, PCI_DMA_TODEVICE);
5878 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5881 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5882 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5883 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5884 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5887 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5889 prod = NEXT_TX_BD(prod);
5890 bp->tx_prod_bseq += skb->len;
5892 REG_WR16(bp, bp->tx_bidx_addr, prod);
5893 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5898 dev->trans_start = jiffies;
5900 if (unlikely(bnx2_tx_avail(bp, bnapi) <= MAX_SKB_FRAGS)) {
5901 netif_stop_queue(dev);
5902 if (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)
5903 netif_wake_queue(dev);
5906 return NETDEV_TX_OK;
5909 /* Called with rtnl_lock */
5911 bnx2_close(struct net_device *dev)
5913 struct bnx2 *bp = netdev_priv(dev);
5916 /* Calling flush_scheduled_work() may deadlock because
5917 * linkwatch_event() may be on the workqueue and it will try to get
5918 * the rtnl_lock which we are holding.
5920 while (bp->in_reset_task)
5923 bnx2_disable_int_sync(bp);
5924 bnx2_napi_disable(bp);
5925 del_timer_sync(&bp->timer);
5926 if (bp->flags & BNX2_FLAG_NO_WOL)
5927 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5929 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5931 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5932 bnx2_reset_chip(bp, reset_code);
5937 netif_carrier_off(bp->dev);
5938 bnx2_set_power_state(bp, PCI_D3hot);
5942 #define GET_NET_STATS64(ctr) \
5943 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5944 (unsigned long) (ctr##_lo)
5946 #define GET_NET_STATS32(ctr) \
5949 #if (BITS_PER_LONG == 64)
5950 #define GET_NET_STATS GET_NET_STATS64
5952 #define GET_NET_STATS GET_NET_STATS32
5955 static struct net_device_stats *
5956 bnx2_get_stats(struct net_device *dev)
5958 struct bnx2 *bp = netdev_priv(dev);
5959 struct statistics_block *stats_blk = bp->stats_blk;
5960 struct net_device_stats *net_stats = &bp->net_stats;
5962 if (bp->stats_blk == NULL) {
5965 net_stats->rx_packets =
5966 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5967 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5968 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5970 net_stats->tx_packets =
5971 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5972 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5973 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5975 net_stats->rx_bytes =
5976 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5978 net_stats->tx_bytes =
5979 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5981 net_stats->multicast =
5982 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5984 net_stats->collisions =
5985 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5987 net_stats->rx_length_errors =
5988 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5989 stats_blk->stat_EtherStatsOverrsizePkts);
5991 net_stats->rx_over_errors =
5992 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5994 net_stats->rx_frame_errors =
5995 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5997 net_stats->rx_crc_errors =
5998 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6000 net_stats->rx_errors = net_stats->rx_length_errors +
6001 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6002 net_stats->rx_crc_errors;
6004 net_stats->tx_aborted_errors =
6005 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6006 stats_blk->stat_Dot3StatsLateCollisions);
6008 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6009 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6010 net_stats->tx_carrier_errors = 0;
6012 net_stats->tx_carrier_errors =
6014 stats_blk->stat_Dot3StatsCarrierSenseErrors;
6017 net_stats->tx_errors =
6019 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6021 net_stats->tx_aborted_errors +
6022 net_stats->tx_carrier_errors;
6024 net_stats->rx_missed_errors =
6025 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6026 stats_blk->stat_FwRxDrop);
6031 /* All ethtool functions called with rtnl_lock */
6034 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6036 struct bnx2 *bp = netdev_priv(dev);
6037 int support_serdes = 0, support_copper = 0;
6039 cmd->supported = SUPPORTED_Autoneg;
6040 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6043 } else if (bp->phy_port == PORT_FIBRE)
6048 if (support_serdes) {
6049 cmd->supported |= SUPPORTED_1000baseT_Full |
6051 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6052 cmd->supported |= SUPPORTED_2500baseX_Full;
6055 if (support_copper) {
6056 cmd->supported |= SUPPORTED_10baseT_Half |
6057 SUPPORTED_10baseT_Full |
6058 SUPPORTED_100baseT_Half |
6059 SUPPORTED_100baseT_Full |
6060 SUPPORTED_1000baseT_Full |
6065 spin_lock_bh(&bp->phy_lock);
6066 cmd->port = bp->phy_port;
6067 cmd->advertising = bp->advertising;
6069 if (bp->autoneg & AUTONEG_SPEED) {
6070 cmd->autoneg = AUTONEG_ENABLE;
6073 cmd->autoneg = AUTONEG_DISABLE;
6076 if (netif_carrier_ok(dev)) {
6077 cmd->speed = bp->line_speed;
6078 cmd->duplex = bp->duplex;
6084 spin_unlock_bh(&bp->phy_lock);
6086 cmd->transceiver = XCVR_INTERNAL;
6087 cmd->phy_address = bp->phy_addr;
6093 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6095 struct bnx2 *bp = netdev_priv(dev);
6096 u8 autoneg = bp->autoneg;
6097 u8 req_duplex = bp->req_duplex;
6098 u16 req_line_speed = bp->req_line_speed;
6099 u32 advertising = bp->advertising;
6102 spin_lock_bh(&bp->phy_lock);
6104 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6105 goto err_out_unlock;
6107 if (cmd->port != bp->phy_port &&
6108 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6109 goto err_out_unlock;
6111 if (cmd->autoneg == AUTONEG_ENABLE) {
6112 autoneg |= AUTONEG_SPEED;
6114 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6116 /* allow advertising 1 speed */
6117 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6118 (cmd->advertising == ADVERTISED_10baseT_Full) ||
6119 (cmd->advertising == ADVERTISED_100baseT_Half) ||
6120 (cmd->advertising == ADVERTISED_100baseT_Full)) {
6122 if (cmd->port == PORT_FIBRE)
6123 goto err_out_unlock;
6125 advertising = cmd->advertising;
6127 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6128 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6129 (cmd->port == PORT_TP))
6130 goto err_out_unlock;
6131 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6132 advertising = cmd->advertising;
6133 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6134 goto err_out_unlock;
6136 if (cmd->port == PORT_FIBRE)
6137 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6139 advertising = ETHTOOL_ALL_COPPER_SPEED;
6141 advertising |= ADVERTISED_Autoneg;
6144 if (cmd->port == PORT_FIBRE) {
6145 if ((cmd->speed != SPEED_1000 &&
6146 cmd->speed != SPEED_2500) ||
6147 (cmd->duplex != DUPLEX_FULL))
6148 goto err_out_unlock;
6150 if (cmd->speed == SPEED_2500 &&
6151 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6152 goto err_out_unlock;
6154 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6155 goto err_out_unlock;
6157 autoneg &= ~AUTONEG_SPEED;
6158 req_line_speed = cmd->speed;
6159 req_duplex = cmd->duplex;
6163 bp->autoneg = autoneg;
6164 bp->advertising = advertising;
6165 bp->req_line_speed = req_line_speed;
6166 bp->req_duplex = req_duplex;
6168 err = bnx2_setup_phy(bp, cmd->port);
6171 spin_unlock_bh(&bp->phy_lock);
6177 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6179 struct bnx2 *bp = netdev_priv(dev);
6181 strcpy(info->driver, DRV_MODULE_NAME);
6182 strcpy(info->version, DRV_MODULE_VERSION);
6183 strcpy(info->bus_info, pci_name(bp->pdev));
6184 strcpy(info->fw_version, bp->fw_version);
6187 #define BNX2_REGDUMP_LEN (32 * 1024)
6190 bnx2_get_regs_len(struct net_device *dev)
6192 return BNX2_REGDUMP_LEN;
6196 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6198 u32 *p = _p, i, offset;
6200 struct bnx2 *bp = netdev_priv(dev);
6201 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6202 0x0800, 0x0880, 0x0c00, 0x0c10,
6203 0x0c30, 0x0d08, 0x1000, 0x101c,
6204 0x1040, 0x1048, 0x1080, 0x10a4,
6205 0x1400, 0x1490, 0x1498, 0x14f0,
6206 0x1500, 0x155c, 0x1580, 0x15dc,
6207 0x1600, 0x1658, 0x1680, 0x16d8,
6208 0x1800, 0x1820, 0x1840, 0x1854,
6209 0x1880, 0x1894, 0x1900, 0x1984,
6210 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6211 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6212 0x2000, 0x2030, 0x23c0, 0x2400,
6213 0x2800, 0x2820, 0x2830, 0x2850,
6214 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6215 0x3c00, 0x3c94, 0x4000, 0x4010,
6216 0x4080, 0x4090, 0x43c0, 0x4458,
6217 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6218 0x4fc0, 0x5010, 0x53c0, 0x5444,
6219 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6220 0x5fc0, 0x6000, 0x6400, 0x6428,
6221 0x6800, 0x6848, 0x684c, 0x6860,
6222 0x6888, 0x6910, 0x8000 };
6226 memset(p, 0, BNX2_REGDUMP_LEN);
6228 if (!netif_running(bp->dev))
6232 offset = reg_boundaries[0];
6234 while (offset < BNX2_REGDUMP_LEN) {
6235 *p++ = REG_RD(bp, offset);
6237 if (offset == reg_boundaries[i + 1]) {
6238 offset = reg_boundaries[i + 2];
6239 p = (u32 *) (orig_p + offset);
6246 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6248 struct bnx2 *bp = netdev_priv(dev);
6250 if (bp->flags & BNX2_FLAG_NO_WOL) {
6255 wol->supported = WAKE_MAGIC;
6257 wol->wolopts = WAKE_MAGIC;
6261 memset(&wol->sopass, 0, sizeof(wol->sopass));
6265 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6267 struct bnx2 *bp = netdev_priv(dev);
6269 if (wol->wolopts & ~WAKE_MAGIC)
6272 if (wol->wolopts & WAKE_MAGIC) {
6273 if (bp->flags & BNX2_FLAG_NO_WOL)
6285 bnx2_nway_reset(struct net_device *dev)
6287 struct bnx2 *bp = netdev_priv(dev);
6290 if (!(bp->autoneg & AUTONEG_SPEED)) {
6294 spin_lock_bh(&bp->phy_lock);
6296 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6299 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6300 spin_unlock_bh(&bp->phy_lock);
6304 /* Force a link down visible on the other side */
6305 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6306 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6307 spin_unlock_bh(&bp->phy_lock);
6311 spin_lock_bh(&bp->phy_lock);
6313 bp->current_interval = SERDES_AN_TIMEOUT;
6314 bp->serdes_an_pending = 1;
6315 mod_timer(&bp->timer, jiffies + bp->current_interval);
6318 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6319 bmcr &= ~BMCR_LOOPBACK;
6320 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6322 spin_unlock_bh(&bp->phy_lock);
6328 bnx2_get_eeprom_len(struct net_device *dev)
6330 struct bnx2 *bp = netdev_priv(dev);
6332 if (bp->flash_info == NULL)
6335 return (int) bp->flash_size;
6339 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6342 struct bnx2 *bp = netdev_priv(dev);
6345 /* parameters already validated in ethtool_get_eeprom */
6347 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6353 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6356 struct bnx2 *bp = netdev_priv(dev);
6359 /* parameters already validated in ethtool_set_eeprom */
6361 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6367 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6369 struct bnx2 *bp = netdev_priv(dev);
6371 memset(coal, 0, sizeof(struct ethtool_coalesce));
6373 coal->rx_coalesce_usecs = bp->rx_ticks;
6374 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6375 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6376 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6378 coal->tx_coalesce_usecs = bp->tx_ticks;
6379 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6380 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6381 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6383 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6389 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6391 struct bnx2 *bp = netdev_priv(dev);
6393 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6394 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6396 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6397 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6399 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6400 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6402 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6403 if (bp->rx_quick_cons_trip_int > 0xff)
6404 bp->rx_quick_cons_trip_int = 0xff;
6406 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6407 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6409 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6410 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6412 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6413 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6415 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6416 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6419 bp->stats_ticks = coal->stats_block_coalesce_usecs;
6420 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6421 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6422 bp->stats_ticks = USEC_PER_SEC;
6424 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6425 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6426 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6428 if (netif_running(bp->dev)) {
6429 bnx2_netif_stop(bp);
6430 bnx2_init_nic(bp, 0);
6431 bnx2_netif_start(bp);
6438 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6440 struct bnx2 *bp = netdev_priv(dev);
6442 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6443 ering->rx_mini_max_pending = 0;
6444 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6446 ering->rx_pending = bp->rx_ring_size;
6447 ering->rx_mini_pending = 0;
6448 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6450 ering->tx_max_pending = MAX_TX_DESC_CNT;
6451 ering->tx_pending = bp->tx_ring_size;
6455 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6457 if (netif_running(bp->dev)) {
6458 bnx2_netif_stop(bp);
6459 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6464 bnx2_set_rx_ring_size(bp, rx);
6465 bp->tx_ring_size = tx;
6467 if (netif_running(bp->dev)) {
6470 rc = bnx2_alloc_mem(bp);
6473 bnx2_init_nic(bp, 0);
6474 bnx2_netif_start(bp);
6480 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6482 struct bnx2 *bp = netdev_priv(dev);
6485 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6486 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6487 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6491 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6496 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6498 struct bnx2 *bp = netdev_priv(dev);
6500 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6501 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6502 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6506 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6508 struct bnx2 *bp = netdev_priv(dev);
6510 bp->req_flow_ctrl = 0;
6511 if (epause->rx_pause)
6512 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6513 if (epause->tx_pause)
6514 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6516 if (epause->autoneg) {
6517 bp->autoneg |= AUTONEG_FLOW_CTRL;
6520 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6523 spin_lock_bh(&bp->phy_lock);
6525 bnx2_setup_phy(bp, bp->phy_port);
6527 spin_unlock_bh(&bp->phy_lock);
6533 bnx2_get_rx_csum(struct net_device *dev)
6535 struct bnx2 *bp = netdev_priv(dev);
6541 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6543 struct bnx2 *bp = netdev_priv(dev);
6550 bnx2_set_tso(struct net_device *dev, u32 data)
6552 struct bnx2 *bp = netdev_priv(dev);
6555 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6556 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6557 dev->features |= NETIF_F_TSO6;
6559 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6564 #define BNX2_NUM_STATS 46
6567 char string[ETH_GSTRING_LEN];
6568 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6570 { "rx_error_bytes" },
6572 { "tx_error_bytes" },
6573 { "rx_ucast_packets" },
6574 { "rx_mcast_packets" },
6575 { "rx_bcast_packets" },
6576 { "tx_ucast_packets" },
6577 { "tx_mcast_packets" },
6578 { "tx_bcast_packets" },
6579 { "tx_mac_errors" },
6580 { "tx_carrier_errors" },
6581 { "rx_crc_errors" },
6582 { "rx_align_errors" },
6583 { "tx_single_collisions" },
6584 { "tx_multi_collisions" },
6586 { "tx_excess_collisions" },
6587 { "tx_late_collisions" },
6588 { "tx_total_collisions" },
6591 { "rx_undersize_packets" },
6592 { "rx_oversize_packets" },
6593 { "rx_64_byte_packets" },
6594 { "rx_65_to_127_byte_packets" },
6595 { "rx_128_to_255_byte_packets" },
6596 { "rx_256_to_511_byte_packets" },
6597 { "rx_512_to_1023_byte_packets" },
6598 { "rx_1024_to_1522_byte_packets" },
6599 { "rx_1523_to_9022_byte_packets" },
6600 { "tx_64_byte_packets" },
6601 { "tx_65_to_127_byte_packets" },
6602 { "tx_128_to_255_byte_packets" },
6603 { "tx_256_to_511_byte_packets" },
6604 { "tx_512_to_1023_byte_packets" },
6605 { "tx_1024_to_1522_byte_packets" },
6606 { "tx_1523_to_9022_byte_packets" },
6607 { "rx_xon_frames" },
6608 { "rx_xoff_frames" },
6609 { "tx_xon_frames" },
6610 { "tx_xoff_frames" },
6611 { "rx_mac_ctrl_frames" },
6612 { "rx_filtered_packets" },
6614 { "rx_fw_discards" },
6617 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6619 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6620 STATS_OFFSET32(stat_IfHCInOctets_hi),
6621 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6622 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6623 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6624 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6625 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6626 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6627 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6628 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6629 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6630 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6631 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6632 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6633 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6634 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6635 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6636 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6637 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6638 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6639 STATS_OFFSET32(stat_EtherStatsCollisions),
6640 STATS_OFFSET32(stat_EtherStatsFragments),
6641 STATS_OFFSET32(stat_EtherStatsJabbers),
6642 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6643 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6644 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6645 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6646 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6647 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6648 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6649 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6650 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6651 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6652 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6653 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6654 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6655 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6656 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6657 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6658 STATS_OFFSET32(stat_XonPauseFramesReceived),
6659 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6660 STATS_OFFSET32(stat_OutXonSent),
6661 STATS_OFFSET32(stat_OutXoffSent),
6662 STATS_OFFSET32(stat_MacControlFramesReceived),
6663 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6664 STATS_OFFSET32(stat_IfInMBUFDiscards),
6665 STATS_OFFSET32(stat_FwRxDrop),
6668 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6669 * skipped because of errata.
6671 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6672 8,0,8,8,8,8,8,8,8,8,
6673 4,0,4,4,4,4,4,4,4,4,
6674 4,4,4,4,4,4,4,4,4,4,
6675 4,4,4,4,4,4,4,4,4,4,
6679 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6680 8,0,8,8,8,8,8,8,8,8,
6681 4,4,4,4,4,4,4,4,4,4,
6682 4,4,4,4,4,4,4,4,4,4,
6683 4,4,4,4,4,4,4,4,4,4,
6687 #define BNX2_NUM_TESTS 6
6690 char string[ETH_GSTRING_LEN];
6691 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6692 { "register_test (offline)" },
6693 { "memory_test (offline)" },
6694 { "loopback_test (offline)" },
6695 { "nvram_test (online)" },
6696 { "interrupt_test (online)" },
6697 { "link_test (online)" },
6701 bnx2_get_sset_count(struct net_device *dev, int sset)
6705 return BNX2_NUM_TESTS;
6707 return BNX2_NUM_STATS;
6714 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6716 struct bnx2 *bp = netdev_priv(dev);
6718 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6719 if (etest->flags & ETH_TEST_FL_OFFLINE) {
6722 bnx2_netif_stop(bp);
6723 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6726 if (bnx2_test_registers(bp) != 0) {
6728 etest->flags |= ETH_TEST_FL_FAILED;
6730 if (bnx2_test_memory(bp) != 0) {
6732 etest->flags |= ETH_TEST_FL_FAILED;
6734 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6735 etest->flags |= ETH_TEST_FL_FAILED;
6737 if (!netif_running(bp->dev)) {
6738 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6741 bnx2_init_nic(bp, 1);
6742 bnx2_netif_start(bp);
6745 /* wait for link up */
6746 for (i = 0; i < 7; i++) {
6749 msleep_interruptible(1000);
6753 if (bnx2_test_nvram(bp) != 0) {
6755 etest->flags |= ETH_TEST_FL_FAILED;
6757 if (bnx2_test_intr(bp) != 0) {
6759 etest->flags |= ETH_TEST_FL_FAILED;
6762 if (bnx2_test_link(bp) != 0) {
6764 etest->flags |= ETH_TEST_FL_FAILED;
6770 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6772 switch (stringset) {
6774 memcpy(buf, bnx2_stats_str_arr,
6775 sizeof(bnx2_stats_str_arr));
6778 memcpy(buf, bnx2_tests_str_arr,
6779 sizeof(bnx2_tests_str_arr));
6785 bnx2_get_ethtool_stats(struct net_device *dev,
6786 struct ethtool_stats *stats, u64 *buf)
6788 struct bnx2 *bp = netdev_priv(dev);
6790 u32 *hw_stats = (u32 *) bp->stats_blk;
6791 u8 *stats_len_arr = NULL;
6793 if (hw_stats == NULL) {
6794 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6798 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6799 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6800 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6801 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6802 stats_len_arr = bnx2_5706_stats_len_arr;
6804 stats_len_arr = bnx2_5708_stats_len_arr;
6806 for (i = 0; i < BNX2_NUM_STATS; i++) {
6807 if (stats_len_arr[i] == 0) {
6808 /* skip this counter */
6812 if (stats_len_arr[i] == 4) {
6813 /* 4-byte counter */
6815 *(hw_stats + bnx2_stats_offset_arr[i]);
6818 /* 8-byte counter */
6819 buf[i] = (((u64) *(hw_stats +
6820 bnx2_stats_offset_arr[i])) << 32) +
6821 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6826 bnx2_phys_id(struct net_device *dev, u32 data)
6828 struct bnx2 *bp = netdev_priv(dev);
6835 save = REG_RD(bp, BNX2_MISC_CFG);
6836 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6838 for (i = 0; i < (data * 2); i++) {
6840 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6843 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6844 BNX2_EMAC_LED_1000MB_OVERRIDE |
6845 BNX2_EMAC_LED_100MB_OVERRIDE |
6846 BNX2_EMAC_LED_10MB_OVERRIDE |
6847 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6848 BNX2_EMAC_LED_TRAFFIC);
6850 msleep_interruptible(500);
6851 if (signal_pending(current))
6854 REG_WR(bp, BNX2_EMAC_LED, 0);
6855 REG_WR(bp, BNX2_MISC_CFG, save);
6860 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6862 struct bnx2 *bp = netdev_priv(dev);
6864 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6865 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6867 return (ethtool_op_set_tx_csum(dev, data));
6870 static const struct ethtool_ops bnx2_ethtool_ops = {
6871 .get_settings = bnx2_get_settings,
6872 .set_settings = bnx2_set_settings,
6873 .get_drvinfo = bnx2_get_drvinfo,
6874 .get_regs_len = bnx2_get_regs_len,
6875 .get_regs = bnx2_get_regs,
6876 .get_wol = bnx2_get_wol,
6877 .set_wol = bnx2_set_wol,
6878 .nway_reset = bnx2_nway_reset,
6879 .get_link = ethtool_op_get_link,
6880 .get_eeprom_len = bnx2_get_eeprom_len,
6881 .get_eeprom = bnx2_get_eeprom,
6882 .set_eeprom = bnx2_set_eeprom,
6883 .get_coalesce = bnx2_get_coalesce,
6884 .set_coalesce = bnx2_set_coalesce,
6885 .get_ringparam = bnx2_get_ringparam,
6886 .set_ringparam = bnx2_set_ringparam,
6887 .get_pauseparam = bnx2_get_pauseparam,
6888 .set_pauseparam = bnx2_set_pauseparam,
6889 .get_rx_csum = bnx2_get_rx_csum,
6890 .set_rx_csum = bnx2_set_rx_csum,
6891 .set_tx_csum = bnx2_set_tx_csum,
6892 .set_sg = ethtool_op_set_sg,
6893 .set_tso = bnx2_set_tso,
6894 .self_test = bnx2_self_test,
6895 .get_strings = bnx2_get_strings,
6896 .phys_id = bnx2_phys_id,
6897 .get_ethtool_stats = bnx2_get_ethtool_stats,
6898 .get_sset_count = bnx2_get_sset_count,
6901 /* Called with rtnl_lock */
6903 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6905 struct mii_ioctl_data *data = if_mii(ifr);
6906 struct bnx2 *bp = netdev_priv(dev);
6911 data->phy_id = bp->phy_addr;
6917 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6920 if (!netif_running(dev))
6923 spin_lock_bh(&bp->phy_lock);
6924 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6925 spin_unlock_bh(&bp->phy_lock);
6927 data->val_out = mii_regval;
6933 if (!capable(CAP_NET_ADMIN))
6936 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6939 if (!netif_running(dev))
6942 spin_lock_bh(&bp->phy_lock);
6943 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6944 spin_unlock_bh(&bp->phy_lock);
6955 /* Called with rtnl_lock */
6957 bnx2_change_mac_addr(struct net_device *dev, void *p)
6959 struct sockaddr *addr = p;
6960 struct bnx2 *bp = netdev_priv(dev);
6962 if (!is_valid_ether_addr(addr->sa_data))
6965 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6966 if (netif_running(dev))
6967 bnx2_set_mac_addr(bp);
6972 /* Called with rtnl_lock */
6974 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6976 struct bnx2 *bp = netdev_priv(dev);
6978 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6979 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6983 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
6986 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6988 poll_bnx2(struct net_device *dev)
6990 struct bnx2 *bp = netdev_priv(dev);
6992 disable_irq(bp->pdev->irq);
6993 bnx2_interrupt(bp->pdev->irq, dev);
6994 enable_irq(bp->pdev->irq);
6998 static void __devinit
6999 bnx2_get_5709_media(struct bnx2 *bp)
7001 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7002 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7005 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7007 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7008 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7012 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7013 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7015 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7017 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7022 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7030 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7036 static void __devinit
7037 bnx2_get_pci_speed(struct bnx2 *bp)
7041 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7042 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7045 bp->flags |= BNX2_FLAG_PCIX;
7047 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7049 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7051 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7052 bp->bus_speed_mhz = 133;
7055 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7056 bp->bus_speed_mhz = 100;
7059 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7060 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7061 bp->bus_speed_mhz = 66;
7064 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7065 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7066 bp->bus_speed_mhz = 50;
7069 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7070 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7071 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7072 bp->bus_speed_mhz = 33;
7077 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7078 bp->bus_speed_mhz = 66;
7080 bp->bus_speed_mhz = 33;
7083 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7084 bp->flags |= BNX2_FLAG_PCI_32BIT;
7088 static int __devinit
7089 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7092 unsigned long mem_len;
7095 u64 dma_mask, persist_dma_mask;
7097 SET_NETDEV_DEV(dev, &pdev->dev);
7098 bp = netdev_priv(dev);
7103 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7104 rc = pci_enable_device(pdev);
7106 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7110 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7112 "Cannot find PCI device base address, aborting.\n");
7114 goto err_out_disable;
7117 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7119 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7120 goto err_out_disable;
7123 pci_set_master(pdev);
7124 pci_save_state(pdev);
7126 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7127 if (bp->pm_cap == 0) {
7129 "Cannot find power management capability, aborting.\n");
7131 goto err_out_release;
7137 spin_lock_init(&bp->phy_lock);
7138 spin_lock_init(&bp->indirect_lock);
7139 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7141 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7142 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
7143 dev->mem_end = dev->mem_start + mem_len;
7144 dev->irq = pdev->irq;
7146 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7149 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7151 goto err_out_release;
7154 /* Configure byte swap and enable write to the reg_window registers.
7155 * Rely on CPU to do target byte swapping on big endian systems
7156 * The chip's target access swapping will not swap all accesses
7158 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7159 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7160 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7162 bnx2_set_power_state(bp, PCI_D0);
7164 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7166 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7167 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7169 "Cannot find PCIE capability, aborting.\n");
7173 bp->flags |= BNX2_FLAG_PCIE;
7174 if (CHIP_REV(bp) == CHIP_REV_Ax)
7175 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7177 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7178 if (bp->pcix_cap == 0) {
7180 "Cannot find PCIX capability, aborting.\n");
7186 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7187 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7188 bp->flags |= BNX2_FLAG_MSIX_CAP;
7191 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7192 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7193 bp->flags |= BNX2_FLAG_MSI_CAP;
7196 /* 5708 cannot support DMA addresses > 40-bit. */
7197 if (CHIP_NUM(bp) == CHIP_NUM_5708)
7198 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7200 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7202 /* Configure DMA attributes. */
7203 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7204 dev->features |= NETIF_F_HIGHDMA;
7205 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7208 "pci_set_consistent_dma_mask failed, aborting.\n");
7211 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7212 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7216 if (!(bp->flags & BNX2_FLAG_PCIE))
7217 bnx2_get_pci_speed(bp);
7219 /* 5706A0 may falsely detect SERR and PERR. */
7220 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7221 reg = REG_RD(bp, PCI_COMMAND);
7222 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7223 REG_WR(bp, PCI_COMMAND, reg);
7225 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7226 !(bp->flags & BNX2_FLAG_PCIX)) {
7229 "5706 A1 can only be used in a PCIX bus, aborting.\n");
7233 bnx2_init_nvram(bp);
7235 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7237 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7238 BNX2_SHM_HDR_SIGNATURE_SIG) {
7239 u32 off = PCI_FUNC(pdev->devfn) << 2;
7241 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7243 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7245 /* Get the permanent MAC address. First we need to make sure the
7246 * firmware is actually running.
7248 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7250 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7251 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7252 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7257 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7258 for (i = 0, j = 0; i < 3; i++) {
7261 num = (u8) (reg >> (24 - (i * 8)));
7262 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7263 if (num >= k || !skip0 || k == 1) {
7264 bp->fw_version[j++] = (num / k) + '0';
7269 bp->fw_version[j++] = '.';
7271 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7272 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7275 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7276 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7278 for (i = 0; i < 30; i++) {
7279 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7280 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7285 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7286 reg &= BNX2_CONDITION_MFW_RUN_MASK;
7287 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7288 reg != BNX2_CONDITION_MFW_RUN_NONE) {
7290 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7292 bp->fw_version[j++] = ' ';
7293 for (i = 0; i < 3; i++) {
7294 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7296 memcpy(&bp->fw_version[j], ®, 4);
7301 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7302 bp->mac_addr[0] = (u8) (reg >> 8);
7303 bp->mac_addr[1] = (u8) reg;
7305 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7306 bp->mac_addr[2] = (u8) (reg >> 24);
7307 bp->mac_addr[3] = (u8) (reg >> 16);
7308 bp->mac_addr[4] = (u8) (reg >> 8);
7309 bp->mac_addr[5] = (u8) reg;
7311 bp->tx_ring_size = MAX_TX_DESC_CNT;
7312 bnx2_set_rx_ring_size(bp, 255);
7316 bp->tx_quick_cons_trip_int = 20;
7317 bp->tx_quick_cons_trip = 20;
7318 bp->tx_ticks_int = 80;
7321 bp->rx_quick_cons_trip_int = 6;
7322 bp->rx_quick_cons_trip = 6;
7323 bp->rx_ticks_int = 18;
7326 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7328 bp->timer_interval = HZ;
7329 bp->current_interval = HZ;
7333 /* Disable WOL support if we are running on a SERDES chip. */
7334 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7335 bnx2_get_5709_media(bp);
7336 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7337 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7339 bp->phy_port = PORT_TP;
7340 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7341 bp->phy_port = PORT_FIBRE;
7342 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7343 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7344 bp->flags |= BNX2_FLAG_NO_WOL;
7347 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7348 /* Don't do parallel detect on this board because of
7349 * some board problems. The link will not go down
7350 * if we do parallel detect.
7352 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7353 pdev->subsystem_device == 0x310c)
7354 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7357 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7358 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7360 bnx2_init_remote_phy(bp);
7362 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7363 CHIP_NUM(bp) == CHIP_NUM_5708)
7364 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7365 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7366 (CHIP_REV(bp) == CHIP_REV_Ax ||
7367 CHIP_REV(bp) == CHIP_REV_Bx))
7368 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7370 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7371 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7372 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
7373 bp->flags |= BNX2_FLAG_NO_WOL;
7377 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7378 bp->tx_quick_cons_trip_int =
7379 bp->tx_quick_cons_trip;
7380 bp->tx_ticks_int = bp->tx_ticks;
7381 bp->rx_quick_cons_trip_int =
7382 bp->rx_quick_cons_trip;
7383 bp->rx_ticks_int = bp->rx_ticks;
7384 bp->comp_prod_trip_int = bp->comp_prod_trip;
7385 bp->com_ticks_int = bp->com_ticks;
7386 bp->cmd_ticks_int = bp->cmd_ticks;
7389 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7391 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7392 * with byte enables disabled on the unused 32-bit word. This is legal
7393 * but causes problems on the AMD 8132 which will eventually stop
7394 * responding after a while.
7396 * AMD believes this incompatibility is unique to the 5706, and
7397 * prefers to locally disable MSI rather than globally disabling it.
7399 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7400 struct pci_dev *amd_8132 = NULL;
7402 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7403 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7406 if (amd_8132->revision >= 0x10 &&
7407 amd_8132->revision <= 0x13) {
7409 pci_dev_put(amd_8132);
7415 bnx2_set_default_link(bp);
7416 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7418 init_timer(&bp->timer);
7419 bp->timer.expires = RUN_AT(bp->timer_interval);
7420 bp->timer.data = (unsigned long) bp;
7421 bp->timer.function = bnx2_timer;
7427 iounmap(bp->regview);
7432 pci_release_regions(pdev);
7435 pci_disable_device(pdev);
7436 pci_set_drvdata(pdev, NULL);
7442 static char * __devinit
7443 bnx2_bus_string(struct bnx2 *bp, char *str)
7447 if (bp->flags & BNX2_FLAG_PCIE) {
7448 s += sprintf(s, "PCI Express");
7450 s += sprintf(s, "PCI");
7451 if (bp->flags & BNX2_FLAG_PCIX)
7452 s += sprintf(s, "-X");
7453 if (bp->flags & BNX2_FLAG_PCI_32BIT)
7454 s += sprintf(s, " 32-bit");
7456 s += sprintf(s, " 64-bit");
7457 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7462 static void __devinit
7463 bnx2_init_napi(struct bnx2 *bp)
7466 struct bnx2_napi *bnapi;
7468 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
7469 bnapi = &bp->bnx2_napi[i];
7472 netif_napi_add(bp->dev, &bp->bnx2_napi[0].napi, bnx2_poll, 64);
7473 netif_napi_add(bp->dev, &bp->bnx2_napi[BNX2_TX_VEC].napi, bnx2_tx_poll,
7477 static int __devinit
7478 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7480 static int version_printed = 0;
7481 struct net_device *dev = NULL;
7485 DECLARE_MAC_BUF(mac);
7487 if (version_printed++ == 0)
7488 printk(KERN_INFO "%s", version);
7490 /* dev zeroed in init_etherdev */
7491 dev = alloc_etherdev(sizeof(*bp));
7496 rc = bnx2_init_board(pdev, dev);
7502 dev->open = bnx2_open;
7503 dev->hard_start_xmit = bnx2_start_xmit;
7504 dev->stop = bnx2_close;
7505 dev->get_stats = bnx2_get_stats;
7506 dev->set_multicast_list = bnx2_set_rx_mode;
7507 dev->do_ioctl = bnx2_ioctl;
7508 dev->set_mac_address = bnx2_change_mac_addr;
7509 dev->change_mtu = bnx2_change_mtu;
7510 dev->tx_timeout = bnx2_tx_timeout;
7511 dev->watchdog_timeo = TX_TIMEOUT;
7513 dev->vlan_rx_register = bnx2_vlan_rx_register;
7515 dev->ethtool_ops = &bnx2_ethtool_ops;
7517 bp = netdev_priv(dev);
7520 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7521 dev->poll_controller = poll_bnx2;
7524 pci_set_drvdata(pdev, dev);
7526 memcpy(dev->dev_addr, bp->mac_addr, 6);
7527 memcpy(dev->perm_addr, bp->mac_addr, 6);
7528 bp->name = board_info[ent->driver_data].name;
7530 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7531 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7532 dev->features |= NETIF_F_IPV6_CSUM;
7535 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7537 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7538 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7539 dev->features |= NETIF_F_TSO6;
7541 if ((rc = register_netdev(dev))) {
7542 dev_err(&pdev->dev, "Cannot register net device\n");
7544 iounmap(bp->regview);
7545 pci_release_regions(pdev);
7546 pci_disable_device(pdev);
7547 pci_set_drvdata(pdev, NULL);
7552 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7553 "IRQ %d, node addr %s\n",
7556 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7557 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7558 bnx2_bus_string(bp, str),
7560 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7565 static void __devexit
7566 bnx2_remove_one(struct pci_dev *pdev)
7568 struct net_device *dev = pci_get_drvdata(pdev);
7569 struct bnx2 *bp = netdev_priv(dev);
7571 flush_scheduled_work();
7573 unregister_netdev(dev);
7576 iounmap(bp->regview);
7579 pci_release_regions(pdev);
7580 pci_disable_device(pdev);
7581 pci_set_drvdata(pdev, NULL);
7585 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7587 struct net_device *dev = pci_get_drvdata(pdev);
7588 struct bnx2 *bp = netdev_priv(dev);
7591 /* PCI register 4 needs to be saved whether netif_running() or not.
7592 * MSI address and data need to be saved if using MSI and
7595 pci_save_state(pdev);
7596 if (!netif_running(dev))
7599 flush_scheduled_work();
7600 bnx2_netif_stop(bp);
7601 netif_device_detach(dev);
7602 del_timer_sync(&bp->timer);
7603 if (bp->flags & BNX2_FLAG_NO_WOL)
7604 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7606 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7608 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7609 bnx2_reset_chip(bp, reset_code);
7611 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7616 bnx2_resume(struct pci_dev *pdev)
7618 struct net_device *dev = pci_get_drvdata(pdev);
7619 struct bnx2 *bp = netdev_priv(dev);
7621 pci_restore_state(pdev);
7622 if (!netif_running(dev))
7625 bnx2_set_power_state(bp, PCI_D0);
7626 netif_device_attach(dev);
7627 bnx2_init_nic(bp, 1);
7628 bnx2_netif_start(bp);
7633 * bnx2_io_error_detected - called when PCI error is detected
7634 * @pdev: Pointer to PCI device
7635 * @state: The current pci connection state
7637 * This function is called after a PCI bus error affecting
7638 * this device has been detected.
7640 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7641 pci_channel_state_t state)
7643 struct net_device *dev = pci_get_drvdata(pdev);
7644 struct bnx2 *bp = netdev_priv(dev);
7647 netif_device_detach(dev);
7649 if (netif_running(dev)) {
7650 bnx2_netif_stop(bp);
7651 del_timer_sync(&bp->timer);
7652 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
7655 pci_disable_device(pdev);
7658 /* Request a slot slot reset. */
7659 return PCI_ERS_RESULT_NEED_RESET;
7663 * bnx2_io_slot_reset - called after the pci bus has been reset.
7664 * @pdev: Pointer to PCI device
7666 * Restart the card from scratch, as if from a cold-boot.
7668 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
7670 struct net_device *dev = pci_get_drvdata(pdev);
7671 struct bnx2 *bp = netdev_priv(dev);
7674 if (pci_enable_device(pdev)) {
7676 "Cannot re-enable PCI device after reset.\n");
7678 return PCI_ERS_RESULT_DISCONNECT;
7680 pci_set_master(pdev);
7681 pci_restore_state(pdev);
7683 if (netif_running(dev)) {
7684 bnx2_set_power_state(bp, PCI_D0);
7685 bnx2_init_nic(bp, 1);
7689 return PCI_ERS_RESULT_RECOVERED;
7693 * bnx2_io_resume - called when traffic can start flowing again.
7694 * @pdev: Pointer to PCI device
7696 * This callback is called when the error recovery driver tells us that
7697 * its OK to resume normal operation.
7699 static void bnx2_io_resume(struct pci_dev *pdev)
7701 struct net_device *dev = pci_get_drvdata(pdev);
7702 struct bnx2 *bp = netdev_priv(dev);
7705 if (netif_running(dev))
7706 bnx2_netif_start(bp);
7708 netif_device_attach(dev);
7712 static struct pci_error_handlers bnx2_err_handler = {
7713 .error_detected = bnx2_io_error_detected,
7714 .slot_reset = bnx2_io_slot_reset,
7715 .resume = bnx2_io_resume,
7718 static struct pci_driver bnx2_pci_driver = {
7719 .name = DRV_MODULE_NAME,
7720 .id_table = bnx2_pci_tbl,
7721 .probe = bnx2_init_one,
7722 .remove = __devexit_p(bnx2_remove_one),
7723 .suspend = bnx2_suspend,
7724 .resume = bnx2_resume,
7725 .err_handler = &bnx2_err_handler,
7728 static int __init bnx2_init(void)
7730 return pci_register_driver(&bnx2_pci_driver);
7733 static void __exit bnx2_cleanup(void)
7735 pci_unregister_driver(&bnx2_pci_driver);
7738 module_init(bnx2_init);
7739 module_exit(bnx2_cleanup);