1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
15 #define DRV_MODULE_NAME "bnx2"
16 #define PFX DRV_MODULE_NAME ": "
17 #define DRV_MODULE_VERSION "1.4.38"
18 #define DRV_MODULE_RELDATE "February 10, 2006"
20 #define RUN_AT(x) (jiffies + (x))
22 /* Time in jiffies before concluding the transmitter is hung. */
23 #define TX_TIMEOUT (5*HZ)
25 static char version[] __devinitdata =
26 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
28 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
29 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
30 MODULE_LICENSE("GPL");
31 MODULE_VERSION(DRV_MODULE_VERSION);
33 static int disable_msi = 0;
35 module_param(disable_msi, int, 0);
36 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
48 /* indexed by board_t, above */
51 } board_info[] __devinitdata = {
52 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
53 { "HP NC370T Multifunction Gigabit Server Adapter" },
54 { "HP NC370i Multifunction Gigabit Server Adapter" },
55 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
56 { "HP NC370F Multifunction Gigabit Server Adapter" },
57 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
58 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
61 static struct pci_device_id bnx2_pci_tbl[] = {
62 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
63 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
64 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
65 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
66 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
67 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
68 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
69 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
70 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
71 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
72 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
73 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
74 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
75 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
79 static struct flash_spec flash_table[] =
82 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
83 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
84 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
86 /* Expansion entry 0001 */
87 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
88 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
89 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
91 /* Saifun SA25F010 (non-buffered flash) */
92 /* strap, cfg1, & write1 need updates */
93 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
94 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
95 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
96 "Non-buffered flash (128kB)"},
97 /* Saifun SA25F020 (non-buffered flash) */
98 /* strap, cfg1, & write1 need updates */
99 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
100 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
101 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
102 "Non-buffered flash (256kB)"},
103 /* Expansion entry 0100 */
104 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
105 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
106 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
108 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
109 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
110 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
111 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
112 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
113 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
114 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
115 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
116 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
117 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
118 /* Saifun SA25F005 (non-buffered flash) */
119 /* strap, cfg1, & write1 need updates */
120 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
121 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
122 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
123 "Non-buffered flash (64kB)"},
125 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
126 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
127 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
129 /* Expansion entry 1001 */
130 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
131 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
132 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
134 /* Expansion entry 1010 */
135 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
139 /* ATMEL AT45DB011B (buffered flash) */
140 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
141 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
142 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
143 "Buffered flash (128kB)"},
144 /* Expansion entry 1100 */
145 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
146 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
149 /* Expansion entry 1101 */
150 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
151 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
152 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
154 /* Ateml Expansion entry 1110 */
155 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
156 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
157 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
158 "Entry 1110 (Atmel)"},
159 /* ATMEL AT45DB021B (buffered flash) */
160 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
161 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
162 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
163 "Buffered flash (256kB)"},
166 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
168 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
170 u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
172 if (diff > MAX_TX_DESC_CNT)
173 diff = (diff & MAX_TX_DESC_CNT) - 1;
174 return (bp->tx_ring_size - diff);
178 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
180 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
181 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
185 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
187 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
188 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
192 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
195 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
196 REG_WR(bp, BNX2_CTX_DATA, val);
200 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
205 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
206 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
207 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
209 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
210 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
215 val1 = (bp->phy_addr << 21) | (reg << 16) |
216 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
217 BNX2_EMAC_MDIO_COMM_START_BUSY;
218 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
220 for (i = 0; i < 50; i++) {
223 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
224 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
227 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
228 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
234 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
243 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
244 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
245 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
247 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
248 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
257 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
262 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
263 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
264 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
266 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
267 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
272 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
273 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
274 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
275 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
277 for (i = 0; i < 50; i++) {
280 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
281 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
287 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
292 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
293 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
294 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
296 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
297 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
306 bnx2_disable_int(struct bnx2 *bp)
308 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
309 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
310 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
314 bnx2_enable_int(struct bnx2 *bp)
316 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
317 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
318 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
320 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
321 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
323 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
327 bnx2_disable_int_sync(struct bnx2 *bp)
329 atomic_inc(&bp->intr_sem);
330 bnx2_disable_int(bp);
331 synchronize_irq(bp->pdev->irq);
335 bnx2_netif_stop(struct bnx2 *bp)
337 bnx2_disable_int_sync(bp);
338 if (netif_running(bp->dev)) {
339 netif_poll_disable(bp->dev);
340 netif_tx_disable(bp->dev);
341 bp->dev->trans_start = jiffies; /* prevent tx timeout */
346 bnx2_netif_start(struct bnx2 *bp)
348 if (atomic_dec_and_test(&bp->intr_sem)) {
349 if (netif_running(bp->dev)) {
350 netif_wake_queue(bp->dev);
351 netif_poll_enable(bp->dev);
358 bnx2_free_mem(struct bnx2 *bp)
363 pci_free_consistent(bp->pdev, sizeof(struct statistics_block),
364 bp->stats_blk, bp->stats_blk_mapping);
365 bp->stats_blk = NULL;
367 if (bp->status_blk) {
368 pci_free_consistent(bp->pdev, sizeof(struct status_block),
369 bp->status_blk, bp->status_blk_mapping);
370 bp->status_blk = NULL;
372 if (bp->tx_desc_ring) {
373 pci_free_consistent(bp->pdev,
374 sizeof(struct tx_bd) * TX_DESC_CNT,
375 bp->tx_desc_ring, bp->tx_desc_mapping);
376 bp->tx_desc_ring = NULL;
378 kfree(bp->tx_buf_ring);
379 bp->tx_buf_ring = NULL;
380 for (i = 0; i < bp->rx_max_ring; i++) {
381 if (bp->rx_desc_ring[i])
382 pci_free_consistent(bp->pdev,
383 sizeof(struct rx_bd) * RX_DESC_CNT,
385 bp->rx_desc_mapping[i]);
386 bp->rx_desc_ring[i] = NULL;
388 vfree(bp->rx_buf_ring);
389 bp->rx_buf_ring = NULL;
393 bnx2_alloc_mem(struct bnx2 *bp)
397 bp->tx_buf_ring = kmalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
399 if (bp->tx_buf_ring == NULL)
402 memset(bp->tx_buf_ring, 0, sizeof(struct sw_bd) * TX_DESC_CNT);
403 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
404 sizeof(struct tx_bd) *
406 &bp->tx_desc_mapping);
407 if (bp->tx_desc_ring == NULL)
410 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
412 if (bp->rx_buf_ring == NULL)
415 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
418 for (i = 0; i < bp->rx_max_ring; i++) {
419 bp->rx_desc_ring[i] =
420 pci_alloc_consistent(bp->pdev,
421 sizeof(struct rx_bd) * RX_DESC_CNT,
422 &bp->rx_desc_mapping[i]);
423 if (bp->rx_desc_ring[i] == NULL)
428 bp->status_blk = pci_alloc_consistent(bp->pdev,
429 sizeof(struct status_block),
430 &bp->status_blk_mapping);
431 if (bp->status_blk == NULL)
434 memset(bp->status_blk, 0, sizeof(struct status_block));
436 bp->stats_blk = pci_alloc_consistent(bp->pdev,
437 sizeof(struct statistics_block),
438 &bp->stats_blk_mapping);
439 if (bp->stats_blk == NULL)
442 memset(bp->stats_blk, 0, sizeof(struct statistics_block));
452 bnx2_report_fw_link(struct bnx2 *bp)
454 u32 fw_link_status = 0;
459 switch (bp->line_speed) {
461 if (bp->duplex == DUPLEX_HALF)
462 fw_link_status = BNX2_LINK_STATUS_10HALF;
464 fw_link_status = BNX2_LINK_STATUS_10FULL;
467 if (bp->duplex == DUPLEX_HALF)
468 fw_link_status = BNX2_LINK_STATUS_100HALF;
470 fw_link_status = BNX2_LINK_STATUS_100FULL;
473 if (bp->duplex == DUPLEX_HALF)
474 fw_link_status = BNX2_LINK_STATUS_1000HALF;
476 fw_link_status = BNX2_LINK_STATUS_1000FULL;
479 if (bp->duplex == DUPLEX_HALF)
480 fw_link_status = BNX2_LINK_STATUS_2500HALF;
482 fw_link_status = BNX2_LINK_STATUS_2500FULL;
486 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
489 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
491 bnx2_read_phy(bp, MII_BMSR, &bmsr);
492 bnx2_read_phy(bp, MII_BMSR, &bmsr);
494 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
495 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
496 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
498 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
502 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
504 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
508 bnx2_report_link(struct bnx2 *bp)
511 netif_carrier_on(bp->dev);
512 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
514 printk("%d Mbps ", bp->line_speed);
516 if (bp->duplex == DUPLEX_FULL)
517 printk("full duplex");
519 printk("half duplex");
522 if (bp->flow_ctrl & FLOW_CTRL_RX) {
523 printk(", receive ");
524 if (bp->flow_ctrl & FLOW_CTRL_TX)
525 printk("& transmit ");
528 printk(", transmit ");
530 printk("flow control ON");
535 netif_carrier_off(bp->dev);
536 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
539 bnx2_report_fw_link(bp);
543 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
545 u32 local_adv, remote_adv;
548 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
549 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
551 if (bp->duplex == DUPLEX_FULL) {
552 bp->flow_ctrl = bp->req_flow_ctrl;
557 if (bp->duplex != DUPLEX_FULL) {
561 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
562 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
565 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
566 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
567 bp->flow_ctrl |= FLOW_CTRL_TX;
568 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
569 bp->flow_ctrl |= FLOW_CTRL_RX;
573 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
574 bnx2_read_phy(bp, MII_LPA, &remote_adv);
576 if (bp->phy_flags & PHY_SERDES_FLAG) {
577 u32 new_local_adv = 0;
578 u32 new_remote_adv = 0;
580 if (local_adv & ADVERTISE_1000XPAUSE)
581 new_local_adv |= ADVERTISE_PAUSE_CAP;
582 if (local_adv & ADVERTISE_1000XPSE_ASYM)
583 new_local_adv |= ADVERTISE_PAUSE_ASYM;
584 if (remote_adv & ADVERTISE_1000XPAUSE)
585 new_remote_adv |= ADVERTISE_PAUSE_CAP;
586 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
587 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
589 local_adv = new_local_adv;
590 remote_adv = new_remote_adv;
593 /* See Table 28B-3 of 802.3ab-1999 spec. */
594 if (local_adv & ADVERTISE_PAUSE_CAP) {
595 if(local_adv & ADVERTISE_PAUSE_ASYM) {
596 if (remote_adv & ADVERTISE_PAUSE_CAP) {
597 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
599 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
600 bp->flow_ctrl = FLOW_CTRL_RX;
604 if (remote_adv & ADVERTISE_PAUSE_CAP) {
605 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
609 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
610 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
611 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
613 bp->flow_ctrl = FLOW_CTRL_TX;
619 bnx2_5708s_linkup(struct bnx2 *bp)
624 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
625 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
626 case BCM5708S_1000X_STAT1_SPEED_10:
627 bp->line_speed = SPEED_10;
629 case BCM5708S_1000X_STAT1_SPEED_100:
630 bp->line_speed = SPEED_100;
632 case BCM5708S_1000X_STAT1_SPEED_1G:
633 bp->line_speed = SPEED_1000;
635 case BCM5708S_1000X_STAT1_SPEED_2G5:
636 bp->line_speed = SPEED_2500;
639 if (val & BCM5708S_1000X_STAT1_FD)
640 bp->duplex = DUPLEX_FULL;
642 bp->duplex = DUPLEX_HALF;
648 bnx2_5706s_linkup(struct bnx2 *bp)
650 u32 bmcr, local_adv, remote_adv, common;
653 bp->line_speed = SPEED_1000;
655 bnx2_read_phy(bp, MII_BMCR, &bmcr);
656 if (bmcr & BMCR_FULLDPLX) {
657 bp->duplex = DUPLEX_FULL;
660 bp->duplex = DUPLEX_HALF;
663 if (!(bmcr & BMCR_ANENABLE)) {
667 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
668 bnx2_read_phy(bp, MII_LPA, &remote_adv);
670 common = local_adv & remote_adv;
671 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
673 if (common & ADVERTISE_1000XFULL) {
674 bp->duplex = DUPLEX_FULL;
677 bp->duplex = DUPLEX_HALF;
685 bnx2_copper_linkup(struct bnx2 *bp)
689 bnx2_read_phy(bp, MII_BMCR, &bmcr);
690 if (bmcr & BMCR_ANENABLE) {
691 u32 local_adv, remote_adv, common;
693 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
694 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
696 common = local_adv & (remote_adv >> 2);
697 if (common & ADVERTISE_1000FULL) {
698 bp->line_speed = SPEED_1000;
699 bp->duplex = DUPLEX_FULL;
701 else if (common & ADVERTISE_1000HALF) {
702 bp->line_speed = SPEED_1000;
703 bp->duplex = DUPLEX_HALF;
706 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
707 bnx2_read_phy(bp, MII_LPA, &remote_adv);
709 common = local_adv & remote_adv;
710 if (common & ADVERTISE_100FULL) {
711 bp->line_speed = SPEED_100;
712 bp->duplex = DUPLEX_FULL;
714 else if (common & ADVERTISE_100HALF) {
715 bp->line_speed = SPEED_100;
716 bp->duplex = DUPLEX_HALF;
718 else if (common & ADVERTISE_10FULL) {
719 bp->line_speed = SPEED_10;
720 bp->duplex = DUPLEX_FULL;
722 else if (common & ADVERTISE_10HALF) {
723 bp->line_speed = SPEED_10;
724 bp->duplex = DUPLEX_HALF;
733 if (bmcr & BMCR_SPEED100) {
734 bp->line_speed = SPEED_100;
737 bp->line_speed = SPEED_10;
739 if (bmcr & BMCR_FULLDPLX) {
740 bp->duplex = DUPLEX_FULL;
743 bp->duplex = DUPLEX_HALF;
751 bnx2_set_mac_link(struct bnx2 *bp)
755 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
756 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
757 (bp->duplex == DUPLEX_HALF)) {
758 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
761 /* Configure the EMAC mode register. */
762 val = REG_RD(bp, BNX2_EMAC_MODE);
764 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
765 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
769 switch (bp->line_speed) {
771 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
772 val |= BNX2_EMAC_MODE_PORT_MII_10;
777 val |= BNX2_EMAC_MODE_PORT_MII;
780 val |= BNX2_EMAC_MODE_25G;
783 val |= BNX2_EMAC_MODE_PORT_GMII;
788 val |= BNX2_EMAC_MODE_PORT_GMII;
791 /* Set the MAC to operate in the appropriate duplex mode. */
792 if (bp->duplex == DUPLEX_HALF)
793 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
794 REG_WR(bp, BNX2_EMAC_MODE, val);
796 /* Enable/disable rx PAUSE. */
797 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
799 if (bp->flow_ctrl & FLOW_CTRL_RX)
800 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
801 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
803 /* Enable/disable tx PAUSE. */
804 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
805 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
807 if (bp->flow_ctrl & FLOW_CTRL_TX)
808 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
809 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
811 /* Acknowledge the interrupt. */
812 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
818 bnx2_set_link(struct bnx2 *bp)
823 if (bp->loopback == MAC_LOOPBACK) {
828 link_up = bp->link_up;
830 bnx2_read_phy(bp, MII_BMSR, &bmsr);
831 bnx2_read_phy(bp, MII_BMSR, &bmsr);
833 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
834 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
837 val = REG_RD(bp, BNX2_EMAC_STATUS);
838 if (val & BNX2_EMAC_STATUS_LINK)
839 bmsr |= BMSR_LSTATUS;
841 bmsr &= ~BMSR_LSTATUS;
844 if (bmsr & BMSR_LSTATUS) {
847 if (bp->phy_flags & PHY_SERDES_FLAG) {
848 if (CHIP_NUM(bp) == CHIP_NUM_5706)
849 bnx2_5706s_linkup(bp);
850 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
851 bnx2_5708s_linkup(bp);
854 bnx2_copper_linkup(bp);
856 bnx2_resolve_flow_ctrl(bp);
859 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
860 (bp->autoneg & AUTONEG_SPEED)) {
864 bnx2_read_phy(bp, MII_BMCR, &bmcr);
865 if (!(bmcr & BMCR_ANENABLE)) {
866 bnx2_write_phy(bp, MII_BMCR, bmcr |
870 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
874 if (bp->link_up != link_up) {
875 bnx2_report_link(bp);
878 bnx2_set_mac_link(bp);
884 bnx2_reset_phy(struct bnx2 *bp)
889 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
891 #define PHY_RESET_MAX_WAIT 100
892 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
895 bnx2_read_phy(bp, MII_BMCR, ®);
896 if (!(reg & BMCR_RESET)) {
901 if (i == PHY_RESET_MAX_WAIT) {
908 bnx2_phy_get_pause_adv(struct bnx2 *bp)
912 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
913 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
915 if (bp->phy_flags & PHY_SERDES_FLAG) {
916 adv = ADVERTISE_1000XPAUSE;
919 adv = ADVERTISE_PAUSE_CAP;
922 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
923 if (bp->phy_flags & PHY_SERDES_FLAG) {
924 adv = ADVERTISE_1000XPSE_ASYM;
927 adv = ADVERTISE_PAUSE_ASYM;
930 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
931 if (bp->phy_flags & PHY_SERDES_FLAG) {
932 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
935 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
942 bnx2_setup_serdes_phy(struct bnx2 *bp)
947 if (!(bp->autoneg & AUTONEG_SPEED)) {
949 int force_link_down = 0;
951 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
952 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
953 if (up1 & BCM5708S_UP1_2G5) {
954 up1 &= ~BCM5708S_UP1_2G5;
955 bnx2_write_phy(bp, BCM5708S_UP1, up1);
960 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
961 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
963 bnx2_read_phy(bp, MII_BMCR, &bmcr);
964 new_bmcr = bmcr & ~BMCR_ANENABLE;
965 new_bmcr |= BMCR_SPEED1000;
966 if (bp->req_duplex == DUPLEX_FULL) {
967 adv |= ADVERTISE_1000XFULL;
968 new_bmcr |= BMCR_FULLDPLX;
971 adv |= ADVERTISE_1000XHALF;
972 new_bmcr &= ~BMCR_FULLDPLX;
974 if ((new_bmcr != bmcr) || (force_link_down)) {
975 /* Force a link down visible on the other side */
977 bnx2_write_phy(bp, MII_ADVERTISE, adv &
978 ~(ADVERTISE_1000XFULL |
979 ADVERTISE_1000XHALF));
980 bnx2_write_phy(bp, MII_BMCR, bmcr |
981 BMCR_ANRESTART | BMCR_ANENABLE);
984 netif_carrier_off(bp->dev);
985 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
987 bnx2_write_phy(bp, MII_ADVERTISE, adv);
988 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
993 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
994 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
995 up1 |= BCM5708S_UP1_2G5;
996 bnx2_write_phy(bp, BCM5708S_UP1, up1);
999 if (bp->advertising & ADVERTISED_1000baseT_Full)
1000 new_adv |= ADVERTISE_1000XFULL;
1002 new_adv |= bnx2_phy_get_pause_adv(bp);
1004 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1005 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1007 bp->serdes_an_pending = 0;
1008 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1009 /* Force a link down visible on the other side */
1013 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1014 for (i = 0; i < 110; i++) {
1019 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1020 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1022 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1023 /* Speed up link-up time when the link partner
1024 * does not autonegotiate which is very common
1025 * in blade servers. Some blade servers use
1026 * IPMI for kerboard input and it's important
1027 * to minimize link disruptions. Autoneg. involves
1028 * exchanging base pages plus 3 next pages and
1029 * normally completes in about 120 msec.
1031 bp->current_interval = SERDES_AN_TIMEOUT;
1032 bp->serdes_an_pending = 1;
1033 mod_timer(&bp->timer, jiffies + bp->current_interval);
1040 #define ETHTOOL_ALL_FIBRE_SPEED \
1041 (ADVERTISED_1000baseT_Full)
1043 #define ETHTOOL_ALL_COPPER_SPEED \
1044 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1045 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1046 ADVERTISED_1000baseT_Full)
1048 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1049 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1051 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1054 bnx2_setup_copper_phy(struct bnx2 *bp)
1059 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1061 if (bp->autoneg & AUTONEG_SPEED) {
1062 u32 adv_reg, adv1000_reg;
1063 u32 new_adv_reg = 0;
1064 u32 new_adv1000_reg = 0;
1066 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1067 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1068 ADVERTISE_PAUSE_ASYM);
1070 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1071 adv1000_reg &= PHY_ALL_1000_SPEED;
1073 if (bp->advertising & ADVERTISED_10baseT_Half)
1074 new_adv_reg |= ADVERTISE_10HALF;
1075 if (bp->advertising & ADVERTISED_10baseT_Full)
1076 new_adv_reg |= ADVERTISE_10FULL;
1077 if (bp->advertising & ADVERTISED_100baseT_Half)
1078 new_adv_reg |= ADVERTISE_100HALF;
1079 if (bp->advertising & ADVERTISED_100baseT_Full)
1080 new_adv_reg |= ADVERTISE_100FULL;
1081 if (bp->advertising & ADVERTISED_1000baseT_Full)
1082 new_adv1000_reg |= ADVERTISE_1000FULL;
1084 new_adv_reg |= ADVERTISE_CSMA;
1086 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1088 if ((adv1000_reg != new_adv1000_reg) ||
1089 (adv_reg != new_adv_reg) ||
1090 ((bmcr & BMCR_ANENABLE) == 0)) {
1092 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1093 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1094 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1097 else if (bp->link_up) {
1098 /* Flow ctrl may have changed from auto to forced */
1099 /* or vice-versa. */
1101 bnx2_resolve_flow_ctrl(bp);
1102 bnx2_set_mac_link(bp);
1108 if (bp->req_line_speed == SPEED_100) {
1109 new_bmcr |= BMCR_SPEED100;
1111 if (bp->req_duplex == DUPLEX_FULL) {
1112 new_bmcr |= BMCR_FULLDPLX;
1114 if (new_bmcr != bmcr) {
1118 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1119 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1121 if (bmsr & BMSR_LSTATUS) {
1122 /* Force link down */
1123 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1126 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1127 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1129 } while ((bmsr & BMSR_LSTATUS) && (i < 620));
1132 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1134 /* Normally, the new speed is setup after the link has
1135 * gone down and up again. In some cases, link will not go
1136 * down so we need to set up the new speed here.
1138 if (bmsr & BMSR_LSTATUS) {
1139 bp->line_speed = bp->req_line_speed;
1140 bp->duplex = bp->req_duplex;
1141 bnx2_resolve_flow_ctrl(bp);
1142 bnx2_set_mac_link(bp);
1149 bnx2_setup_phy(struct bnx2 *bp)
1151 if (bp->loopback == MAC_LOOPBACK)
1154 if (bp->phy_flags & PHY_SERDES_FLAG) {
1155 return (bnx2_setup_serdes_phy(bp));
1158 return (bnx2_setup_copper_phy(bp));
1163 bnx2_init_5708s_phy(struct bnx2 *bp)
1167 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1168 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1169 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1171 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1172 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1173 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1175 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1176 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1177 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1179 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1180 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1181 val |= BCM5708S_UP1_2G5;
1182 bnx2_write_phy(bp, BCM5708S_UP1, val);
1185 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1186 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1187 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1188 /* increase tx signal amplitude */
1189 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1190 BCM5708S_BLK_ADDR_TX_MISC);
1191 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1192 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1193 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1194 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1197 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1198 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1203 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1204 BNX2_SHARED_HW_CFG_CONFIG);
1205 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1206 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1207 BCM5708S_BLK_ADDR_TX_MISC);
1208 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1209 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1210 BCM5708S_BLK_ADDR_DIG);
1217 bnx2_init_5706s_phy(struct bnx2 *bp)
1219 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1221 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1222 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1225 if (bp->dev->mtu > 1500) {
1228 /* Set extended packet length bit */
1229 bnx2_write_phy(bp, 0x18, 0x7);
1230 bnx2_read_phy(bp, 0x18, &val);
1231 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1233 bnx2_write_phy(bp, 0x1c, 0x6c00);
1234 bnx2_read_phy(bp, 0x1c, &val);
1235 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1240 bnx2_write_phy(bp, 0x18, 0x7);
1241 bnx2_read_phy(bp, 0x18, &val);
1242 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1244 bnx2_write_phy(bp, 0x1c, 0x6c00);
1245 bnx2_read_phy(bp, 0x1c, &val);
1246 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1253 bnx2_init_copper_phy(struct bnx2 *bp)
1257 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1259 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1260 bnx2_write_phy(bp, 0x18, 0x0c00);
1261 bnx2_write_phy(bp, 0x17, 0x000a);
1262 bnx2_write_phy(bp, 0x15, 0x310b);
1263 bnx2_write_phy(bp, 0x17, 0x201f);
1264 bnx2_write_phy(bp, 0x15, 0x9506);
1265 bnx2_write_phy(bp, 0x17, 0x401f);
1266 bnx2_write_phy(bp, 0x15, 0x14e2);
1267 bnx2_write_phy(bp, 0x18, 0x0400);
1270 if (bp->dev->mtu > 1500) {
1271 /* Set extended packet length bit */
1272 bnx2_write_phy(bp, 0x18, 0x7);
1273 bnx2_read_phy(bp, 0x18, &val);
1274 bnx2_write_phy(bp, 0x18, val | 0x4000);
1276 bnx2_read_phy(bp, 0x10, &val);
1277 bnx2_write_phy(bp, 0x10, val | 0x1);
1280 bnx2_write_phy(bp, 0x18, 0x7);
1281 bnx2_read_phy(bp, 0x18, &val);
1282 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1284 bnx2_read_phy(bp, 0x10, &val);
1285 bnx2_write_phy(bp, 0x10, val & ~0x1);
1288 /* ethernet@wirespeed */
1289 bnx2_write_phy(bp, 0x18, 0x7007);
1290 bnx2_read_phy(bp, 0x18, &val);
1291 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1297 bnx2_init_phy(struct bnx2 *bp)
1302 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1303 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1305 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1309 bnx2_read_phy(bp, MII_PHYSID1, &val);
1310 bp->phy_id = val << 16;
1311 bnx2_read_phy(bp, MII_PHYSID2, &val);
1312 bp->phy_id |= val & 0xffff;
1314 if (bp->phy_flags & PHY_SERDES_FLAG) {
1315 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1316 rc = bnx2_init_5706s_phy(bp);
1317 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1318 rc = bnx2_init_5708s_phy(bp);
1321 rc = bnx2_init_copper_phy(bp);
1330 bnx2_set_mac_loopback(struct bnx2 *bp)
1334 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1335 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1336 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1337 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1342 static int bnx2_test_link(struct bnx2 *);
1345 bnx2_set_phy_loopback(struct bnx2 *bp)
1350 spin_lock_bh(&bp->phy_lock);
1351 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1353 spin_unlock_bh(&bp->phy_lock);
1357 for (i = 0; i < 10; i++) {
1358 if (bnx2_test_link(bp) == 0)
1363 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1364 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1365 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1366 BNX2_EMAC_MODE_25G);
1368 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1369 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1375 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1381 msg_data |= bp->fw_wr_seq;
1383 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1385 /* wait for an acknowledgement. */
1386 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1389 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1391 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1394 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1397 /* If we timed out, inform the firmware that this is the case. */
1398 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1400 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1403 msg_data &= ~BNX2_DRV_MSG_CODE;
1404 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1406 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1411 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1418 bnx2_init_context(struct bnx2 *bp)
1424 u32 vcid_addr, pcid_addr, offset;
1428 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1431 vcid_addr = GET_PCID_ADDR(vcid);
1433 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1438 pcid_addr = GET_PCID_ADDR(new_vcid);
1441 vcid_addr = GET_CID_ADDR(vcid);
1442 pcid_addr = vcid_addr;
1445 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1446 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1448 /* Zero out the context. */
1449 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1450 CTX_WR(bp, 0x00, offset, 0);
1453 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1454 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1459 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1465 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1466 if (good_mbuf == NULL) {
1467 printk(KERN_ERR PFX "Failed to allocate memory in "
1468 "bnx2_alloc_bad_rbuf\n");
1472 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1473 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1477 /* Allocate a bunch of mbufs and save the good ones in an array. */
1478 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1479 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1480 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1482 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1484 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1486 /* The addresses with Bit 9 set are bad memory blocks. */
1487 if (!(val & (1 << 9))) {
1488 good_mbuf[good_mbuf_cnt] = (u16) val;
1492 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1495 /* Free the good ones back to the mbuf pool thus discarding
1496 * all the bad ones. */
1497 while (good_mbuf_cnt) {
1500 val = good_mbuf[good_mbuf_cnt];
1501 val = (val << 9) | val | 1;
1503 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1510 bnx2_set_mac_addr(struct bnx2 *bp)
1513 u8 *mac_addr = bp->dev->dev_addr;
1515 val = (mac_addr[0] << 8) | mac_addr[1];
1517 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1519 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1520 (mac_addr[4] << 8) | mac_addr[5];
1522 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1526 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1528 struct sk_buff *skb;
1529 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1531 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1532 unsigned long align;
1534 skb = dev_alloc_skb(bp->rx_buf_size);
1539 if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1540 skb_reserve(skb, 8 - align);
1544 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1545 PCI_DMA_FROMDEVICE);
1548 pci_unmap_addr_set(rx_buf, mapping, mapping);
1550 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1551 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1553 bp->rx_prod_bseq += bp->rx_buf_use_size;
1559 bnx2_phy_int(struct bnx2 *bp)
1561 u32 new_link_state, old_link_state;
1563 new_link_state = bp->status_blk->status_attn_bits &
1564 STATUS_ATTN_BITS_LINK_STATE;
1565 old_link_state = bp->status_blk->status_attn_bits_ack &
1566 STATUS_ATTN_BITS_LINK_STATE;
1567 if (new_link_state != old_link_state) {
1568 if (new_link_state) {
1569 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1570 STATUS_ATTN_BITS_LINK_STATE);
1573 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1574 STATUS_ATTN_BITS_LINK_STATE);
1581 bnx2_tx_int(struct bnx2 *bp)
1583 struct status_block *sblk = bp->status_blk;
1584 u16 hw_cons, sw_cons, sw_ring_cons;
1587 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1588 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1591 sw_cons = bp->tx_cons;
1593 while (sw_cons != hw_cons) {
1594 struct sw_bd *tx_buf;
1595 struct sk_buff *skb;
1598 sw_ring_cons = TX_RING_IDX(sw_cons);
1600 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1603 /* partial BD completions possible with TSO packets */
1604 if (skb_shinfo(skb)->tso_size) {
1605 u16 last_idx, last_ring_idx;
1607 last_idx = sw_cons +
1608 skb_shinfo(skb)->nr_frags + 1;
1609 last_ring_idx = sw_ring_cons +
1610 skb_shinfo(skb)->nr_frags + 1;
1611 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1614 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1619 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1620 skb_headlen(skb), PCI_DMA_TODEVICE);
1623 last = skb_shinfo(skb)->nr_frags;
1625 for (i = 0; i < last; i++) {
1626 sw_cons = NEXT_TX_BD(sw_cons);
1628 pci_unmap_page(bp->pdev,
1630 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1632 skb_shinfo(skb)->frags[i].size,
1636 sw_cons = NEXT_TX_BD(sw_cons);
1638 tx_free_bd += last + 1;
1640 dev_kfree_skb_irq(skb);
1642 hw_cons = bp->hw_tx_cons =
1643 sblk->status_tx_quick_consumer_index0;
1645 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1650 bp->tx_cons = sw_cons;
1652 if (unlikely(netif_queue_stopped(bp->dev))) {
1653 spin_lock(&bp->tx_lock);
1654 if ((netif_queue_stopped(bp->dev)) &&
1655 (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) {
1657 netif_wake_queue(bp->dev);
1659 spin_unlock(&bp->tx_lock);
1664 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1667 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1668 struct rx_bd *cons_bd, *prod_bd;
1670 cons_rx_buf = &bp->rx_buf_ring[cons];
1671 prod_rx_buf = &bp->rx_buf_ring[prod];
1673 pci_dma_sync_single_for_device(bp->pdev,
1674 pci_unmap_addr(cons_rx_buf, mapping),
1675 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1677 bp->rx_prod_bseq += bp->rx_buf_use_size;
1679 prod_rx_buf->skb = skb;
1684 pci_unmap_addr_set(prod_rx_buf, mapping,
1685 pci_unmap_addr(cons_rx_buf, mapping));
1687 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1688 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1689 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1690 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1694 bnx2_rx_int(struct bnx2 *bp, int budget)
1696 struct status_block *sblk = bp->status_blk;
1697 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1698 struct l2_fhdr *rx_hdr;
1701 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1702 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1705 sw_cons = bp->rx_cons;
1706 sw_prod = bp->rx_prod;
1708 /* Memory barrier necessary as speculative reads of the rx
1709 * buffer can be ahead of the index in the status block
1712 while (sw_cons != hw_cons) {
1715 struct sw_bd *rx_buf;
1716 struct sk_buff *skb;
1717 dma_addr_t dma_addr;
1719 sw_ring_cons = RX_RING_IDX(sw_cons);
1720 sw_ring_prod = RX_RING_IDX(sw_prod);
1722 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1727 dma_addr = pci_unmap_addr(rx_buf, mapping);
1729 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1730 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1732 rx_hdr = (struct l2_fhdr *) skb->data;
1733 len = rx_hdr->l2_fhdr_pkt_len - 4;
1735 if ((status = rx_hdr->l2_fhdr_status) &
1736 (L2_FHDR_ERRORS_BAD_CRC |
1737 L2_FHDR_ERRORS_PHY_DECODE |
1738 L2_FHDR_ERRORS_ALIGNMENT |
1739 L2_FHDR_ERRORS_TOO_SHORT |
1740 L2_FHDR_ERRORS_GIANT_FRAME)) {
1745 /* Since we don't have a jumbo ring, copy small packets
1748 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1749 struct sk_buff *new_skb;
1751 new_skb = dev_alloc_skb(len + 2);
1752 if (new_skb == NULL)
1756 memcpy(new_skb->data,
1757 skb->data + bp->rx_offset - 2,
1760 skb_reserve(new_skb, 2);
1761 skb_put(new_skb, len);
1762 new_skb->dev = bp->dev;
1764 bnx2_reuse_rx_skb(bp, skb,
1765 sw_ring_cons, sw_ring_prod);
1769 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1770 pci_unmap_single(bp->pdev, dma_addr,
1771 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1773 skb_reserve(skb, bp->rx_offset);
1778 bnx2_reuse_rx_skb(bp, skb,
1779 sw_ring_cons, sw_ring_prod);
1783 skb->protocol = eth_type_trans(skb, bp->dev);
1785 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1786 (htons(skb->protocol) != 0x8100)) {
1788 dev_kfree_skb_irq(skb);
1793 skb->ip_summed = CHECKSUM_NONE;
1795 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1796 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1798 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1799 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1800 skb->ip_summed = CHECKSUM_UNNECESSARY;
1804 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1805 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1806 rx_hdr->l2_fhdr_vlan_tag);
1810 netif_receive_skb(skb);
1812 bp->dev->last_rx = jiffies;
1816 sw_cons = NEXT_RX_BD(sw_cons);
1817 sw_prod = NEXT_RX_BD(sw_prod);
1819 if ((rx_pkt == budget))
1822 /* Refresh hw_cons to see if there is new work */
1823 if (sw_cons == hw_cons) {
1824 hw_cons = bp->hw_rx_cons =
1825 sblk->status_rx_quick_consumer_index0;
1826 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1831 bp->rx_cons = sw_cons;
1832 bp->rx_prod = sw_prod;
1834 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1836 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1844 /* MSI ISR - The only difference between this and the INTx ISR
1845 * is that the MSI interrupt is always serviced.
1848 bnx2_msi(int irq, void *dev_instance, struct pt_regs *regs)
1850 struct net_device *dev = dev_instance;
1851 struct bnx2 *bp = netdev_priv(dev);
1853 prefetch(bp->status_blk);
1854 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1855 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1856 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1858 /* Return here if interrupt is disabled. */
1859 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1862 netif_rx_schedule(dev);
1868 bnx2_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1870 struct net_device *dev = dev_instance;
1871 struct bnx2 *bp = netdev_priv(dev);
1873 /* When using INTx, it is possible for the interrupt to arrive
1874 * at the CPU before the status block posted prior to the
1875 * interrupt. Reading a register will flush the status block.
1876 * When using MSI, the MSI message will always complete after
1877 * the status block write.
1879 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
1880 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1881 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
1884 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1885 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1886 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1888 /* Return here if interrupt is shared and is disabled. */
1889 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1892 netif_rx_schedule(dev);
1898 bnx2_has_work(struct bnx2 *bp)
1900 struct status_block *sblk = bp->status_blk;
1902 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
1903 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
1906 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
1914 bnx2_poll(struct net_device *dev, int *budget)
1916 struct bnx2 *bp = netdev_priv(dev);
1918 if ((bp->status_blk->status_attn_bits &
1919 STATUS_ATTN_BITS_LINK_STATE) !=
1920 (bp->status_blk->status_attn_bits_ack &
1921 STATUS_ATTN_BITS_LINK_STATE)) {
1923 spin_lock(&bp->phy_lock);
1925 spin_unlock(&bp->phy_lock);
1927 /* This is needed to take care of transient status
1928 * during link changes.
1930 REG_WR(bp, BNX2_HC_COMMAND,
1931 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
1932 REG_RD(bp, BNX2_HC_COMMAND);
1935 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
1938 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
1939 int orig_budget = *budget;
1942 if (orig_budget > dev->quota)
1943 orig_budget = dev->quota;
1945 work_done = bnx2_rx_int(bp, orig_budget);
1946 *budget -= work_done;
1947 dev->quota -= work_done;
1950 bp->last_status_idx = bp->status_blk->status_idx;
1953 if (!bnx2_has_work(bp)) {
1954 netif_rx_complete(dev);
1955 if (likely(bp->flags & USING_MSI_FLAG)) {
1956 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1957 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1958 bp->last_status_idx);
1961 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1962 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1963 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
1964 bp->last_status_idx);
1966 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1967 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
1968 bp->last_status_idx);
1975 /* Called with rtnl_lock from vlan functions and also dev->xmit_lock
1976 * from set_multicast.
1979 bnx2_set_rx_mode(struct net_device *dev)
1981 struct bnx2 *bp = netdev_priv(dev);
1982 u32 rx_mode, sort_mode;
1985 spin_lock_bh(&bp->phy_lock);
1987 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
1988 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
1989 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
1991 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
1992 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
1994 if (!(bp->flags & ASF_ENABLE_FLAG))
1995 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
1997 if (dev->flags & IFF_PROMISC) {
1998 /* Promiscuous mode. */
1999 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2000 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN;
2002 else if (dev->flags & IFF_ALLMULTI) {
2003 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2004 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2007 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2010 /* Accept one or more multicast(s). */
2011 struct dev_mc_list *mclist;
2012 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2017 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2019 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2020 i++, mclist = mclist->next) {
2022 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2024 regidx = (bit & 0xe0) >> 5;
2026 mc_filter[regidx] |= (1 << bit);
2029 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2030 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2034 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2037 if (rx_mode != bp->rx_mode) {
2038 bp->rx_mode = rx_mode;
2039 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2042 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2043 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2044 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2046 spin_unlock_bh(&bp->phy_lock);
2050 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2057 for (i = 0; i < rv2p_code_len; i += 8) {
2058 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, *rv2p_code);
2060 REG_WR(bp, BNX2_RV2P_INSTR_LOW, *rv2p_code);
2063 if (rv2p_proc == RV2P_PROC1) {
2064 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2065 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2068 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2069 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2073 /* Reset the processor, un-stall is done later. */
2074 if (rv2p_proc == RV2P_PROC1) {
2075 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2078 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2083 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2089 val = REG_RD_IND(bp, cpu_reg->mode);
2090 val |= cpu_reg->mode_value_halt;
2091 REG_WR_IND(bp, cpu_reg->mode, val);
2092 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2094 /* Load the Text area. */
2095 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2099 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2100 REG_WR_IND(bp, offset, fw->text[j]);
2104 /* Load the Data area. */
2105 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2109 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2110 REG_WR_IND(bp, offset, fw->data[j]);
2114 /* Load the SBSS area. */
2115 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2119 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2120 REG_WR_IND(bp, offset, fw->sbss[j]);
2124 /* Load the BSS area. */
2125 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2129 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2130 REG_WR_IND(bp, offset, fw->bss[j]);
2134 /* Load the Read-Only area. */
2135 offset = cpu_reg->spad_base +
2136 (fw->rodata_addr - cpu_reg->mips_view_base);
2140 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2141 REG_WR_IND(bp, offset, fw->rodata[j]);
2145 /* Clear the pre-fetch instruction. */
2146 REG_WR_IND(bp, cpu_reg->inst, 0);
2147 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2149 /* Start the CPU. */
2150 val = REG_RD_IND(bp, cpu_reg->mode);
2151 val &= ~cpu_reg->mode_value_halt;
2152 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2153 REG_WR_IND(bp, cpu_reg->mode, val);
2157 bnx2_init_cpus(struct bnx2 *bp)
2159 struct cpu_reg cpu_reg;
2162 /* Initialize the RV2P processor. */
2163 load_rv2p_fw(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), RV2P_PROC1);
2164 load_rv2p_fw(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), RV2P_PROC2);
2166 /* Initialize the RX Processor. */
2167 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2168 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2169 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2170 cpu_reg.state = BNX2_RXP_CPU_STATE;
2171 cpu_reg.state_value_clear = 0xffffff;
2172 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2173 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2174 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2175 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2176 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2177 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2178 cpu_reg.mips_view_base = 0x8000000;
2180 fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
2181 fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
2182 fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
2183 fw.start_addr = bnx2_RXP_b06FwStartAddr;
2185 fw.text_addr = bnx2_RXP_b06FwTextAddr;
2186 fw.text_len = bnx2_RXP_b06FwTextLen;
2188 fw.text = bnx2_RXP_b06FwText;
2190 fw.data_addr = bnx2_RXP_b06FwDataAddr;
2191 fw.data_len = bnx2_RXP_b06FwDataLen;
2193 fw.data = bnx2_RXP_b06FwData;
2195 fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
2196 fw.sbss_len = bnx2_RXP_b06FwSbssLen;
2198 fw.sbss = bnx2_RXP_b06FwSbss;
2200 fw.bss_addr = bnx2_RXP_b06FwBssAddr;
2201 fw.bss_len = bnx2_RXP_b06FwBssLen;
2203 fw.bss = bnx2_RXP_b06FwBss;
2205 fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
2206 fw.rodata_len = bnx2_RXP_b06FwRodataLen;
2207 fw.rodata_index = 0;
2208 fw.rodata = bnx2_RXP_b06FwRodata;
2210 load_cpu_fw(bp, &cpu_reg, &fw);
2212 /* Initialize the TX Processor. */
2213 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2214 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2215 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2216 cpu_reg.state = BNX2_TXP_CPU_STATE;
2217 cpu_reg.state_value_clear = 0xffffff;
2218 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2219 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2220 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2221 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2222 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2223 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2224 cpu_reg.mips_view_base = 0x8000000;
2226 fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
2227 fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
2228 fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
2229 fw.start_addr = bnx2_TXP_b06FwStartAddr;
2231 fw.text_addr = bnx2_TXP_b06FwTextAddr;
2232 fw.text_len = bnx2_TXP_b06FwTextLen;
2234 fw.text = bnx2_TXP_b06FwText;
2236 fw.data_addr = bnx2_TXP_b06FwDataAddr;
2237 fw.data_len = bnx2_TXP_b06FwDataLen;
2239 fw.data = bnx2_TXP_b06FwData;
2241 fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
2242 fw.sbss_len = bnx2_TXP_b06FwSbssLen;
2244 fw.sbss = bnx2_TXP_b06FwSbss;
2246 fw.bss_addr = bnx2_TXP_b06FwBssAddr;
2247 fw.bss_len = bnx2_TXP_b06FwBssLen;
2249 fw.bss = bnx2_TXP_b06FwBss;
2251 fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
2252 fw.rodata_len = bnx2_TXP_b06FwRodataLen;
2253 fw.rodata_index = 0;
2254 fw.rodata = bnx2_TXP_b06FwRodata;
2256 load_cpu_fw(bp, &cpu_reg, &fw);
2258 /* Initialize the TX Patch-up Processor. */
2259 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2260 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2261 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2262 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2263 cpu_reg.state_value_clear = 0xffffff;
2264 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2265 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2266 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2267 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2268 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2269 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2270 cpu_reg.mips_view_base = 0x8000000;
2272 fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
2273 fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
2274 fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
2275 fw.start_addr = bnx2_TPAT_b06FwStartAddr;
2277 fw.text_addr = bnx2_TPAT_b06FwTextAddr;
2278 fw.text_len = bnx2_TPAT_b06FwTextLen;
2280 fw.text = bnx2_TPAT_b06FwText;
2282 fw.data_addr = bnx2_TPAT_b06FwDataAddr;
2283 fw.data_len = bnx2_TPAT_b06FwDataLen;
2285 fw.data = bnx2_TPAT_b06FwData;
2287 fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
2288 fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
2290 fw.sbss = bnx2_TPAT_b06FwSbss;
2292 fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
2293 fw.bss_len = bnx2_TPAT_b06FwBssLen;
2295 fw.bss = bnx2_TPAT_b06FwBss;
2297 fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
2298 fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
2299 fw.rodata_index = 0;
2300 fw.rodata = bnx2_TPAT_b06FwRodata;
2302 load_cpu_fw(bp, &cpu_reg, &fw);
2304 /* Initialize the Completion Processor. */
2305 cpu_reg.mode = BNX2_COM_CPU_MODE;
2306 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2307 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2308 cpu_reg.state = BNX2_COM_CPU_STATE;
2309 cpu_reg.state_value_clear = 0xffffff;
2310 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2311 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2312 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2313 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2314 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2315 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2316 cpu_reg.mips_view_base = 0x8000000;
2318 fw.ver_major = bnx2_COM_b06FwReleaseMajor;
2319 fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
2320 fw.ver_fix = bnx2_COM_b06FwReleaseFix;
2321 fw.start_addr = bnx2_COM_b06FwStartAddr;
2323 fw.text_addr = bnx2_COM_b06FwTextAddr;
2324 fw.text_len = bnx2_COM_b06FwTextLen;
2326 fw.text = bnx2_COM_b06FwText;
2328 fw.data_addr = bnx2_COM_b06FwDataAddr;
2329 fw.data_len = bnx2_COM_b06FwDataLen;
2331 fw.data = bnx2_COM_b06FwData;
2333 fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
2334 fw.sbss_len = bnx2_COM_b06FwSbssLen;
2336 fw.sbss = bnx2_COM_b06FwSbss;
2338 fw.bss_addr = bnx2_COM_b06FwBssAddr;
2339 fw.bss_len = bnx2_COM_b06FwBssLen;
2341 fw.bss = bnx2_COM_b06FwBss;
2343 fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
2344 fw.rodata_len = bnx2_COM_b06FwRodataLen;
2345 fw.rodata_index = 0;
2346 fw.rodata = bnx2_COM_b06FwRodata;
2348 load_cpu_fw(bp, &cpu_reg, &fw);
2353 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2357 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2363 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2364 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2365 PCI_PM_CTRL_PME_STATUS);
2367 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2368 /* delay required during transition out of D3hot */
2371 val = REG_RD(bp, BNX2_EMAC_MODE);
2372 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2373 val &= ~BNX2_EMAC_MODE_MPKT;
2374 REG_WR(bp, BNX2_EMAC_MODE, val);
2376 val = REG_RD(bp, BNX2_RPM_CONFIG);
2377 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2378 REG_WR(bp, BNX2_RPM_CONFIG, val);
2389 autoneg = bp->autoneg;
2390 advertising = bp->advertising;
2392 bp->autoneg = AUTONEG_SPEED;
2393 bp->advertising = ADVERTISED_10baseT_Half |
2394 ADVERTISED_10baseT_Full |
2395 ADVERTISED_100baseT_Half |
2396 ADVERTISED_100baseT_Full |
2399 bnx2_setup_copper_phy(bp);
2401 bp->autoneg = autoneg;
2402 bp->advertising = advertising;
2404 bnx2_set_mac_addr(bp);
2406 val = REG_RD(bp, BNX2_EMAC_MODE);
2408 /* Enable port mode. */
2409 val &= ~BNX2_EMAC_MODE_PORT;
2410 val |= BNX2_EMAC_MODE_PORT_MII |
2411 BNX2_EMAC_MODE_MPKT_RCVD |
2412 BNX2_EMAC_MODE_ACPI_RCVD |
2413 BNX2_EMAC_MODE_MPKT;
2415 REG_WR(bp, BNX2_EMAC_MODE, val);
2417 /* receive all multicast */
2418 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2419 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2422 REG_WR(bp, BNX2_EMAC_RX_MODE,
2423 BNX2_EMAC_RX_MODE_SORT_MODE);
2425 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2426 BNX2_RPM_SORT_USER0_MC_EN;
2427 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2428 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2429 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2430 BNX2_RPM_SORT_USER0_ENA);
2432 /* Need to enable EMAC and RPM for WOL. */
2433 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2434 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2435 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2436 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2438 val = REG_RD(bp, BNX2_RPM_CONFIG);
2439 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2440 REG_WR(bp, BNX2_RPM_CONFIG, val);
2442 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2445 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2448 if (!(bp->flags & NO_WOL_FLAG))
2449 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2451 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2452 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2453 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2462 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2464 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2467 /* No more memory access after this point until
2468 * device is brought back to D0.
2480 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2485 /* Request access to the flash interface. */
2486 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2487 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2488 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2489 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2495 if (j >= NVRAM_TIMEOUT_COUNT)
2502 bnx2_release_nvram_lock(struct bnx2 *bp)
2507 /* Relinquish nvram interface. */
2508 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2510 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2511 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2512 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2518 if (j >= NVRAM_TIMEOUT_COUNT)
2526 bnx2_enable_nvram_write(struct bnx2 *bp)
2530 val = REG_RD(bp, BNX2_MISC_CFG);
2531 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2533 if (!bp->flash_info->buffered) {
2536 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2537 REG_WR(bp, BNX2_NVM_COMMAND,
2538 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2540 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2543 val = REG_RD(bp, BNX2_NVM_COMMAND);
2544 if (val & BNX2_NVM_COMMAND_DONE)
2548 if (j >= NVRAM_TIMEOUT_COUNT)
2555 bnx2_disable_nvram_write(struct bnx2 *bp)
2559 val = REG_RD(bp, BNX2_MISC_CFG);
2560 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2565 bnx2_enable_nvram_access(struct bnx2 *bp)
2569 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2570 /* Enable both bits, even on read. */
2571 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2572 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2576 bnx2_disable_nvram_access(struct bnx2 *bp)
2580 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2581 /* Disable both bits, even after read. */
2582 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2583 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2584 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2588 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2593 if (bp->flash_info->buffered)
2594 /* Buffered flash, no erase needed */
2597 /* Build an erase command */
2598 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2599 BNX2_NVM_COMMAND_DOIT;
2601 /* Need to clear DONE bit separately. */
2602 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2604 /* Address of the NVRAM to read from. */
2605 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2607 /* Issue an erase command. */
2608 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2610 /* Wait for completion. */
2611 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2616 val = REG_RD(bp, BNX2_NVM_COMMAND);
2617 if (val & BNX2_NVM_COMMAND_DONE)
2621 if (j >= NVRAM_TIMEOUT_COUNT)
2628 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2633 /* Build the command word. */
2634 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2636 /* Calculate an offset of a buffered flash. */
2637 if (bp->flash_info->buffered) {
2638 offset = ((offset / bp->flash_info->page_size) <<
2639 bp->flash_info->page_bits) +
2640 (offset % bp->flash_info->page_size);
2643 /* Need to clear DONE bit separately. */
2644 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2646 /* Address of the NVRAM to read from. */
2647 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2649 /* Issue a read command. */
2650 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2652 /* Wait for completion. */
2653 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2658 val = REG_RD(bp, BNX2_NVM_COMMAND);
2659 if (val & BNX2_NVM_COMMAND_DONE) {
2660 val = REG_RD(bp, BNX2_NVM_READ);
2662 val = be32_to_cpu(val);
2663 memcpy(ret_val, &val, 4);
2667 if (j >= NVRAM_TIMEOUT_COUNT)
2675 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2680 /* Build the command word. */
2681 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2683 /* Calculate an offset of a buffered flash. */
2684 if (bp->flash_info->buffered) {
2685 offset = ((offset / bp->flash_info->page_size) <<
2686 bp->flash_info->page_bits) +
2687 (offset % bp->flash_info->page_size);
2690 /* Need to clear DONE bit separately. */
2691 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2693 memcpy(&val32, val, 4);
2694 val32 = cpu_to_be32(val32);
2696 /* Write the data. */
2697 REG_WR(bp, BNX2_NVM_WRITE, val32);
2699 /* Address of the NVRAM to write to. */
2700 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2702 /* Issue the write command. */
2703 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2705 /* Wait for completion. */
2706 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2709 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2712 if (j >= NVRAM_TIMEOUT_COUNT)
2719 bnx2_init_nvram(struct bnx2 *bp)
2722 int j, entry_count, rc;
2723 struct flash_spec *flash;
2725 /* Determine the selected interface. */
2726 val = REG_RD(bp, BNX2_NVM_CFG1);
2728 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2731 if (val & 0x40000000) {
2733 /* Flash interface has been reconfigured */
2734 for (j = 0, flash = &flash_table[0]; j < entry_count;
2736 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2737 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2738 bp->flash_info = flash;
2745 /* Not yet been reconfigured */
2747 if (val & (1 << 23))
2748 mask = FLASH_BACKUP_STRAP_MASK;
2750 mask = FLASH_STRAP_MASK;
2752 for (j = 0, flash = &flash_table[0]; j < entry_count;
2755 if ((val & mask) == (flash->strapping & mask)) {
2756 bp->flash_info = flash;
2758 /* Request access to the flash interface. */
2759 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2762 /* Enable access to flash interface */
2763 bnx2_enable_nvram_access(bp);
2765 /* Reconfigure the flash interface */
2766 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2767 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2768 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2769 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2771 /* Disable access to flash interface */
2772 bnx2_disable_nvram_access(bp);
2773 bnx2_release_nvram_lock(bp);
2778 } /* if (val & 0x40000000) */
2780 if (j == entry_count) {
2781 bp->flash_info = NULL;
2782 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2786 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2787 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2789 bp->flash_size = val;
2791 bp->flash_size = bp->flash_info->total_size;
2797 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2801 u32 cmd_flags, offset32, len32, extra;
2806 /* Request access to the flash interface. */
2807 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2810 /* Enable access to flash interface */
2811 bnx2_enable_nvram_access(bp);
2824 pre_len = 4 - (offset & 3);
2826 if (pre_len >= len32) {
2828 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2829 BNX2_NVM_COMMAND_LAST;
2832 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2835 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2840 memcpy(ret_buf, buf + (offset & 3), pre_len);
2847 extra = 4 - (len32 & 3);
2848 len32 = (len32 + 4) & ~3;
2855 cmd_flags = BNX2_NVM_COMMAND_LAST;
2857 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2858 BNX2_NVM_COMMAND_LAST;
2860 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2862 memcpy(ret_buf, buf, 4 - extra);
2864 else if (len32 > 0) {
2867 /* Read the first word. */
2871 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2873 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
2875 /* Advance to the next dword. */
2880 while (len32 > 4 && rc == 0) {
2881 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
2883 /* Advance to the next dword. */
2892 cmd_flags = BNX2_NVM_COMMAND_LAST;
2893 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2895 memcpy(ret_buf, buf, 4 - extra);
2898 /* Disable access to flash interface */
2899 bnx2_disable_nvram_access(bp);
2901 bnx2_release_nvram_lock(bp);
2907 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
2910 u32 written, offset32, len32;
2911 u8 *buf, start[4], end[4];
2913 int align_start, align_end;
2918 align_start = align_end = 0;
2920 if ((align_start = (offset32 & 3))) {
2922 len32 += align_start;
2923 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
2928 if ((len32 > 4) || !align_start) {
2929 align_end = 4 - (len32 & 3);
2931 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
2938 if (align_start || align_end) {
2939 buf = kmalloc(len32, GFP_KERNEL);
2943 memcpy(buf, start, 4);
2946 memcpy(buf + len32 - 4, end, 4);
2948 memcpy(buf + align_start, data_buf, buf_size);
2952 while ((written < len32) && (rc == 0)) {
2953 u32 page_start, page_end, data_start, data_end;
2954 u32 addr, cmd_flags;
2956 u8 flash_buffer[264];
2958 /* Find the page_start addr */
2959 page_start = offset32 + written;
2960 page_start -= (page_start % bp->flash_info->page_size);
2961 /* Find the page_end addr */
2962 page_end = page_start + bp->flash_info->page_size;
2963 /* Find the data_start addr */
2964 data_start = (written == 0) ? offset32 : page_start;
2965 /* Find the data_end addr */
2966 data_end = (page_end > offset32 + len32) ?
2967 (offset32 + len32) : page_end;
2969 /* Request access to the flash interface. */
2970 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2971 goto nvram_write_end;
2973 /* Enable access to flash interface */
2974 bnx2_enable_nvram_access(bp);
2976 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2977 if (bp->flash_info->buffered == 0) {
2980 /* Read the whole page into the buffer
2981 * (non-buffer flash only) */
2982 for (j = 0; j < bp->flash_info->page_size; j += 4) {
2983 if (j == (bp->flash_info->page_size - 4)) {
2984 cmd_flags |= BNX2_NVM_COMMAND_LAST;
2986 rc = bnx2_nvram_read_dword(bp,
2992 goto nvram_write_end;
2998 /* Enable writes to flash interface (unlock write-protect) */
2999 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3000 goto nvram_write_end;
3002 /* Erase the page */
3003 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3004 goto nvram_write_end;
3006 /* Re-enable the write again for the actual write */
3007 bnx2_enable_nvram_write(bp);
3009 /* Loop to write back the buffer data from page_start to
3012 if (bp->flash_info->buffered == 0) {
3013 for (addr = page_start; addr < data_start;
3014 addr += 4, i += 4) {
3016 rc = bnx2_nvram_write_dword(bp, addr,
3017 &flash_buffer[i], cmd_flags);
3020 goto nvram_write_end;
3026 /* Loop to write the new data from data_start to data_end */
3027 for (addr = data_start; addr < data_end; addr += 4, i++) {
3028 if ((addr == page_end - 4) ||
3029 ((bp->flash_info->buffered) &&
3030 (addr == data_end - 4))) {
3032 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3034 rc = bnx2_nvram_write_dword(bp, addr, buf,
3038 goto nvram_write_end;
3044 /* Loop to write back the buffer data from data_end
3046 if (bp->flash_info->buffered == 0) {
3047 for (addr = data_end; addr < page_end;
3048 addr += 4, i += 4) {
3050 if (addr == page_end-4) {
3051 cmd_flags = BNX2_NVM_COMMAND_LAST;
3053 rc = bnx2_nvram_write_dword(bp, addr,
3054 &flash_buffer[i], cmd_flags);
3057 goto nvram_write_end;
3063 /* Disable writes to flash interface (lock write-protect) */
3064 bnx2_disable_nvram_write(bp);
3066 /* Disable access to flash interface */
3067 bnx2_disable_nvram_access(bp);
3068 bnx2_release_nvram_lock(bp);
3070 /* Increment written */
3071 written += data_end - data_start;
3075 if (align_start || align_end)
3081 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3086 /* Wait for the current PCI transaction to complete before
3087 * issuing a reset. */
3088 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3089 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3090 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3091 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3092 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3093 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3096 /* Wait for the firmware to tell us it is ok to issue a reset. */
3097 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3099 /* Deposit a driver reset signature so the firmware knows that
3100 * this is a soft reset. */
3101 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3102 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3104 /* Do a dummy read to force the chip to complete all current transaction
3105 * before we issue a reset. */
3106 val = REG_RD(bp, BNX2_MISC_ID);
3108 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3109 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3110 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3113 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3115 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3116 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3119 /* Reset takes approximate 30 usec */
3120 for (i = 0; i < 10; i++) {
3121 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3122 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3123 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3129 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3130 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3131 printk(KERN_ERR PFX "Chip reset did not complete\n");
3135 /* Make sure byte swapping is properly configured. */
3136 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3137 if (val != 0x01020304) {
3138 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3142 /* Wait for the firmware to finish its initialization. */
3143 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3147 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3148 /* Adjust the voltage regular to two steps lower. The default
3149 * of this register is 0x0000000e. */
3150 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3152 /* Remove bad rbuf memory from the free pool. */
3153 rc = bnx2_alloc_bad_rbuf(bp);
3160 bnx2_init_chip(struct bnx2 *bp)
3165 /* Make sure the interrupt is not active. */
3166 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3168 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3169 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3171 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3173 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3174 DMA_READ_CHANS << 12 |
3175 DMA_WRITE_CHANS << 16;
3177 val |= (0x2 << 20) | (1 << 11);
3179 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3182 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3183 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3184 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3186 REG_WR(bp, BNX2_DMA_CONFIG, val);
3188 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3189 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3190 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3191 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3194 if (bp->flags & PCIX_FLAG) {
3197 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3199 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3200 val16 & ~PCI_X_CMD_ERO);
3203 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3204 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3205 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3206 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3208 /* Initialize context mapping and zero out the quick contexts. The
3209 * context block must have already been enabled. */
3210 bnx2_init_context(bp);
3213 bnx2_init_nvram(bp);
3215 bnx2_set_mac_addr(bp);
3217 val = REG_RD(bp, BNX2_MQ_CONFIG);
3218 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3219 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3220 REG_WR(bp, BNX2_MQ_CONFIG, val);
3222 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3223 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3224 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3226 val = (BCM_PAGE_BITS - 8) << 24;
3227 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3229 /* Configure page size. */
3230 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3231 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3232 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3233 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3235 val = bp->mac_addr[0] +
3236 (bp->mac_addr[1] << 8) +
3237 (bp->mac_addr[2] << 16) +
3239 (bp->mac_addr[4] << 8) +
3240 (bp->mac_addr[5] << 16);
3241 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3243 /* Program the MTU. Also include 4 bytes for CRC32. */
3244 val = bp->dev->mtu + ETH_HLEN + 4;
3245 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3246 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3247 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3249 bp->last_status_idx = 0;
3250 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3252 /* Set up how to generate a link change interrupt. */
3253 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3255 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3256 (u64) bp->status_blk_mapping & 0xffffffff);
3257 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3259 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3260 (u64) bp->stats_blk_mapping & 0xffffffff);
3261 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3262 (u64) bp->stats_blk_mapping >> 32);
3264 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3265 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3267 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3268 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3270 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3271 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3273 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3275 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3277 REG_WR(bp, BNX2_HC_COM_TICKS,
3278 (bp->com_ticks_int << 16) | bp->com_ticks);
3280 REG_WR(bp, BNX2_HC_CMD_TICKS,
3281 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3283 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3284 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3286 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3287 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3289 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3290 BNX2_HC_CONFIG_TX_TMR_MODE |
3291 BNX2_HC_CONFIG_COLLECT_STATS);
3294 /* Clear internal stats counters. */
3295 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3297 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3299 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3300 BNX2_PORT_FEATURE_ASF_ENABLED)
3301 bp->flags |= ASF_ENABLE_FLAG;
3303 /* Initialize the receive filter. */
3304 bnx2_set_rx_mode(bp->dev);
3306 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3309 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3310 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3314 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3321 bnx2_init_tx_ring(struct bnx2 *bp)
3326 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3328 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3329 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3334 bp->tx_prod_bseq = 0;
3336 val = BNX2_L2CTX_TYPE_TYPE_L2;
3337 val |= BNX2_L2CTX_TYPE_SIZE_L2;
3338 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3340 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3342 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3344 val = (u64) bp->tx_desc_mapping >> 32;
3345 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3347 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3348 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3352 bnx2_init_rx_ring(struct bnx2 *bp)
3356 u16 prod, ring_prod;
3359 /* 8 for CRC and VLAN */
3360 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3361 /* 8 for alignment */
3362 bp->rx_buf_size = bp->rx_buf_use_size + 8;
3364 ring_prod = prod = bp->rx_prod = 0;
3367 bp->rx_prod_bseq = 0;
3369 for (i = 0; i < bp->rx_max_ring; i++) {
3372 rxbd = &bp->rx_desc_ring[i][0];
3373 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3374 rxbd->rx_bd_len = bp->rx_buf_use_size;
3375 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3377 if (i == (bp->rx_max_ring - 1))
3381 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3382 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3386 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3387 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3389 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3391 val = (u64) bp->rx_desc_mapping[0] >> 32;
3392 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3394 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3395 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3397 for (i = 0; i < bp->rx_ring_size; i++) {
3398 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3401 prod = NEXT_RX_BD(prod);
3402 ring_prod = RX_RING_IDX(prod);
3406 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3408 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3412 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3416 bp->rx_ring_size = size;
3418 while (size > MAX_RX_DESC_CNT) {
3419 size -= MAX_RX_DESC_CNT;
3422 /* round to next power of 2 */
3424 while ((max & num_rings) == 0)
3427 if (num_rings != max)
3430 bp->rx_max_ring = max;
3431 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3435 bnx2_free_tx_skbs(struct bnx2 *bp)
3439 if (bp->tx_buf_ring == NULL)
3442 for (i = 0; i < TX_DESC_CNT; ) {
3443 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3444 struct sk_buff *skb = tx_buf->skb;
3452 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3453 skb_headlen(skb), PCI_DMA_TODEVICE);
3457 last = skb_shinfo(skb)->nr_frags;
3458 for (j = 0; j < last; j++) {
3459 tx_buf = &bp->tx_buf_ring[i + j + 1];
3460 pci_unmap_page(bp->pdev,
3461 pci_unmap_addr(tx_buf, mapping),
3462 skb_shinfo(skb)->frags[j].size,
3465 dev_kfree_skb_any(skb);
3472 bnx2_free_rx_skbs(struct bnx2 *bp)
3476 if (bp->rx_buf_ring == NULL)
3479 for (i = 0; i < bp->rx_max_ring_idx; i++) {
3480 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3481 struct sk_buff *skb = rx_buf->skb;
3486 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3487 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3491 dev_kfree_skb_any(skb);
3496 bnx2_free_skbs(struct bnx2 *bp)
3498 bnx2_free_tx_skbs(bp);
3499 bnx2_free_rx_skbs(bp);
3503 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3507 rc = bnx2_reset_chip(bp, reset_code);
3513 bnx2_init_tx_ring(bp);
3514 bnx2_init_rx_ring(bp);
3519 bnx2_init_nic(struct bnx2 *bp)
3523 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3532 bnx2_test_registers(struct bnx2 *bp)
3536 static const struct {
3542 { 0x006c, 0, 0x00000000, 0x0000003f },
3543 { 0x0090, 0, 0xffffffff, 0x00000000 },
3544 { 0x0094, 0, 0x00000000, 0x00000000 },
3546 { 0x0404, 0, 0x00003f00, 0x00000000 },
3547 { 0x0418, 0, 0x00000000, 0xffffffff },
3548 { 0x041c, 0, 0x00000000, 0xffffffff },
3549 { 0x0420, 0, 0x00000000, 0x80ffffff },
3550 { 0x0424, 0, 0x00000000, 0x00000000 },
3551 { 0x0428, 0, 0x00000000, 0x00000001 },
3552 { 0x0450, 0, 0x00000000, 0x0000ffff },
3553 { 0x0454, 0, 0x00000000, 0xffffffff },
3554 { 0x0458, 0, 0x00000000, 0xffffffff },
3556 { 0x0808, 0, 0x00000000, 0xffffffff },
3557 { 0x0854, 0, 0x00000000, 0xffffffff },
3558 { 0x0868, 0, 0x00000000, 0x77777777 },
3559 { 0x086c, 0, 0x00000000, 0x77777777 },
3560 { 0x0870, 0, 0x00000000, 0x77777777 },
3561 { 0x0874, 0, 0x00000000, 0x77777777 },
3563 { 0x0c00, 0, 0x00000000, 0x00000001 },
3564 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3565 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3567 { 0x1000, 0, 0x00000000, 0x00000001 },
3568 { 0x1004, 0, 0x00000000, 0x000f0001 },
3570 { 0x1408, 0, 0x01c00800, 0x00000000 },
3571 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3572 { 0x14a8, 0, 0x00000000, 0x000001ff },
3573 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3574 { 0x14b0, 0, 0x00000002, 0x00000001 },
3575 { 0x14b8, 0, 0x00000000, 0x00000000 },
3576 { 0x14c0, 0, 0x00000000, 0x00000009 },
3577 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3578 { 0x14cc, 0, 0x00000000, 0x00000001 },
3579 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3581 { 0x1800, 0, 0x00000000, 0x00000001 },
3582 { 0x1804, 0, 0x00000000, 0x00000003 },
3584 { 0x2800, 0, 0x00000000, 0x00000001 },
3585 { 0x2804, 0, 0x00000000, 0x00003f01 },
3586 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3587 { 0x2810, 0, 0xffff0000, 0x00000000 },
3588 { 0x2814, 0, 0xffff0000, 0x00000000 },
3589 { 0x2818, 0, 0xffff0000, 0x00000000 },
3590 { 0x281c, 0, 0xffff0000, 0x00000000 },
3591 { 0x2834, 0, 0xffffffff, 0x00000000 },
3592 { 0x2840, 0, 0x00000000, 0xffffffff },
3593 { 0x2844, 0, 0x00000000, 0xffffffff },
3594 { 0x2848, 0, 0xffffffff, 0x00000000 },
3595 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3597 { 0x2c00, 0, 0x00000000, 0x00000011 },
3598 { 0x2c04, 0, 0x00000000, 0x00030007 },
3600 { 0x3c00, 0, 0x00000000, 0x00000001 },
3601 { 0x3c04, 0, 0x00000000, 0x00070000 },
3602 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3603 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3604 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3605 { 0x3c14, 0, 0x00000000, 0xffffffff },
3606 { 0x3c18, 0, 0x00000000, 0xffffffff },
3607 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3608 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3610 { 0x5004, 0, 0x00000000, 0x0000007f },
3611 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3612 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3614 { 0x5c00, 0, 0x00000000, 0x00000001 },
3615 { 0x5c04, 0, 0x00000000, 0x0003000f },
3616 { 0x5c08, 0, 0x00000003, 0x00000000 },
3617 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3618 { 0x5c10, 0, 0x00000000, 0xffffffff },
3619 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3620 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3621 { 0x5c88, 0, 0x00000000, 0x00077373 },
3622 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3624 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3625 { 0x680c, 0, 0xffffffff, 0x00000000 },
3626 { 0x6810, 0, 0xffffffff, 0x00000000 },
3627 { 0x6814, 0, 0xffffffff, 0x00000000 },
3628 { 0x6818, 0, 0xffffffff, 0x00000000 },
3629 { 0x681c, 0, 0xffffffff, 0x00000000 },
3630 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3631 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3632 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3633 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3634 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3635 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3636 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3637 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3638 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3639 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3640 { 0x684c, 0, 0xffffffff, 0x00000000 },
3641 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3642 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3643 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3644 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3645 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3646 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3648 { 0xffff, 0, 0x00000000, 0x00000000 },
3652 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3653 u32 offset, rw_mask, ro_mask, save_val, val;
3655 offset = (u32) reg_tbl[i].offset;
3656 rw_mask = reg_tbl[i].rw_mask;
3657 ro_mask = reg_tbl[i].ro_mask;
3659 save_val = readl(bp->regview + offset);
3661 writel(0, bp->regview + offset);
3663 val = readl(bp->regview + offset);
3664 if ((val & rw_mask) != 0) {
3668 if ((val & ro_mask) != (save_val & ro_mask)) {
3672 writel(0xffffffff, bp->regview + offset);
3674 val = readl(bp->regview + offset);
3675 if ((val & rw_mask) != rw_mask) {
3679 if ((val & ro_mask) != (save_val & ro_mask)) {
3683 writel(save_val, bp->regview + offset);
3687 writel(save_val, bp->regview + offset);
3695 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3697 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3698 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3701 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3704 for (offset = 0; offset < size; offset += 4) {
3706 REG_WR_IND(bp, start + offset, test_pattern[i]);
3708 if (REG_RD_IND(bp, start + offset) !=
3718 bnx2_test_memory(struct bnx2 *bp)
3722 static const struct {
3726 { 0x60000, 0x4000 },
3727 { 0xa0000, 0x3000 },
3728 { 0xe0000, 0x4000 },
3729 { 0x120000, 0x4000 },
3730 { 0x1a0000, 0x4000 },
3731 { 0x160000, 0x4000 },
3735 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3736 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3737 mem_tbl[i].len)) != 0) {
3745 #define BNX2_MAC_LOOPBACK 0
3746 #define BNX2_PHY_LOOPBACK 1
3749 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3751 unsigned int pkt_size, num_pkts, i;
3752 struct sk_buff *skb, *rx_skb;
3753 unsigned char *packet;
3754 u16 rx_start_idx, rx_idx;
3757 struct sw_bd *rx_buf;
3758 struct l2_fhdr *rx_hdr;
3761 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3762 bp->loopback = MAC_LOOPBACK;
3763 bnx2_set_mac_loopback(bp);
3765 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3767 bnx2_set_phy_loopback(bp);
3773 skb = dev_alloc_skb(pkt_size);
3776 packet = skb_put(skb, pkt_size);
3777 memcpy(packet, bp->mac_addr, 6);
3778 memset(packet + 6, 0x0, 8);
3779 for (i = 14; i < pkt_size; i++)
3780 packet[i] = (unsigned char) (i & 0xff);
3782 map = pci_map_single(bp->pdev, skb->data, pkt_size,
3785 REG_WR(bp, BNX2_HC_COMMAND,
3786 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3788 REG_RD(bp, BNX2_HC_COMMAND);
3791 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3795 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
3797 txbd->tx_bd_haddr_hi = (u64) map >> 32;
3798 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3799 txbd->tx_bd_mss_nbytes = pkt_size;
3800 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
3803 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
3804 bp->tx_prod_bseq += pkt_size;
3806 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
3807 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
3811 REG_WR(bp, BNX2_HC_COMMAND,
3812 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3814 REG_RD(bp, BNX2_HC_COMMAND);
3818 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
3819 dev_kfree_skb_irq(skb);
3821 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
3822 goto loopback_test_done;
3825 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
3826 if (rx_idx != rx_start_idx + num_pkts) {
3827 goto loopback_test_done;
3830 rx_buf = &bp->rx_buf_ring[rx_start_idx];
3831 rx_skb = rx_buf->skb;
3833 rx_hdr = (struct l2_fhdr *) rx_skb->data;
3834 skb_reserve(rx_skb, bp->rx_offset);
3836 pci_dma_sync_single_for_cpu(bp->pdev,
3837 pci_unmap_addr(rx_buf, mapping),
3838 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
3840 if (rx_hdr->l2_fhdr_status &
3841 (L2_FHDR_ERRORS_BAD_CRC |
3842 L2_FHDR_ERRORS_PHY_DECODE |
3843 L2_FHDR_ERRORS_ALIGNMENT |
3844 L2_FHDR_ERRORS_TOO_SHORT |
3845 L2_FHDR_ERRORS_GIANT_FRAME)) {
3847 goto loopback_test_done;
3850 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
3851 goto loopback_test_done;
3854 for (i = 14; i < pkt_size; i++) {
3855 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
3856 goto loopback_test_done;
3867 #define BNX2_MAC_LOOPBACK_FAILED 1
3868 #define BNX2_PHY_LOOPBACK_FAILED 2
3869 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
3870 BNX2_PHY_LOOPBACK_FAILED)
3873 bnx2_test_loopback(struct bnx2 *bp)
3877 if (!netif_running(bp->dev))
3878 return BNX2_LOOPBACK_FAILED;
3880 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
3881 spin_lock_bh(&bp->phy_lock);
3883 spin_unlock_bh(&bp->phy_lock);
3884 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
3885 rc |= BNX2_MAC_LOOPBACK_FAILED;
3886 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
3887 rc |= BNX2_PHY_LOOPBACK_FAILED;
3891 #define NVRAM_SIZE 0x200
3892 #define CRC32_RESIDUAL 0xdebb20e3
3895 bnx2_test_nvram(struct bnx2 *bp)
3897 u32 buf[NVRAM_SIZE / 4];
3898 u8 *data = (u8 *) buf;
3902 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
3903 goto test_nvram_done;
3905 magic = be32_to_cpu(buf[0]);
3906 if (magic != 0x669955aa) {
3908 goto test_nvram_done;
3911 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
3912 goto test_nvram_done;
3914 csum = ether_crc_le(0x100, data);
3915 if (csum != CRC32_RESIDUAL) {
3917 goto test_nvram_done;
3920 csum = ether_crc_le(0x100, data + 0x100);
3921 if (csum != CRC32_RESIDUAL) {
3930 bnx2_test_link(struct bnx2 *bp)
3934 spin_lock_bh(&bp->phy_lock);
3935 bnx2_read_phy(bp, MII_BMSR, &bmsr);
3936 bnx2_read_phy(bp, MII_BMSR, &bmsr);
3937 spin_unlock_bh(&bp->phy_lock);
3939 if (bmsr & BMSR_LSTATUS) {
3946 bnx2_test_intr(struct bnx2 *bp)
3951 if (!netif_running(bp->dev))
3954 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
3956 /* This register is not touched during run-time. */
3957 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
3958 REG_RD(bp, BNX2_HC_COMMAND);
3960 for (i = 0; i < 10; i++) {
3961 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
3967 msleep_interruptible(10);
3976 bnx2_timer(unsigned long data)
3978 struct bnx2 *bp = (struct bnx2 *) data;
3981 if (!netif_running(bp->dev))
3984 if (atomic_read(&bp->intr_sem) != 0)
3985 goto bnx2_restart_timer;
3987 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
3988 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
3990 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
3991 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
3993 spin_lock(&bp->phy_lock);
3994 if (bp->serdes_an_pending) {
3995 bp->serdes_an_pending--;
3997 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4000 bp->current_interval = bp->timer_interval;
4002 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4004 if (bmcr & BMCR_ANENABLE) {
4007 bnx2_write_phy(bp, 0x1c, 0x7c00);
4008 bnx2_read_phy(bp, 0x1c, &phy1);
4010 bnx2_write_phy(bp, 0x17, 0x0f01);
4011 bnx2_read_phy(bp, 0x15, &phy2);
4012 bnx2_write_phy(bp, 0x17, 0x0f01);
4013 bnx2_read_phy(bp, 0x15, &phy2);
4015 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4016 !(phy2 & 0x20)) { /* no CONFIG */
4018 bmcr &= ~BMCR_ANENABLE;
4019 bmcr |= BMCR_SPEED1000 |
4021 bnx2_write_phy(bp, MII_BMCR, bmcr);
4023 PHY_PARALLEL_DETECT_FLAG;
4027 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4028 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4031 bnx2_write_phy(bp, 0x17, 0x0f01);
4032 bnx2_read_phy(bp, 0x15, &phy2);
4036 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4037 bmcr |= BMCR_ANENABLE;
4038 bnx2_write_phy(bp, MII_BMCR, bmcr);
4040 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4045 bp->current_interval = bp->timer_interval;
4047 spin_unlock(&bp->phy_lock);
4051 mod_timer(&bp->timer, jiffies + bp->current_interval);
4054 /* Called with rtnl_lock */
4056 bnx2_open(struct net_device *dev)
4058 struct bnx2 *bp = netdev_priv(dev);
4061 bnx2_set_power_state(bp, PCI_D0);
4062 bnx2_disable_int(bp);
4064 rc = bnx2_alloc_mem(bp);
4068 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4069 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4072 if (pci_enable_msi(bp->pdev) == 0) {
4073 bp->flags |= USING_MSI_FLAG;
4074 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4078 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4079 SA_SHIRQ, dev->name, dev);
4083 rc = request_irq(bp->pdev->irq, bnx2_interrupt, SA_SHIRQ,
4091 rc = bnx2_init_nic(bp);
4094 free_irq(bp->pdev->irq, dev);
4095 if (bp->flags & USING_MSI_FLAG) {
4096 pci_disable_msi(bp->pdev);
4097 bp->flags &= ~USING_MSI_FLAG;
4104 mod_timer(&bp->timer, jiffies + bp->current_interval);
4106 atomic_set(&bp->intr_sem, 0);
4108 bnx2_enable_int(bp);
4110 if (bp->flags & USING_MSI_FLAG) {
4111 /* Test MSI to make sure it is working
4112 * If MSI test fails, go back to INTx mode
4114 if (bnx2_test_intr(bp) != 0) {
4115 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4116 " using MSI, switching to INTx mode. Please"
4117 " report this failure to the PCI maintainer"
4118 " and include system chipset information.\n",
4121 bnx2_disable_int(bp);
4122 free_irq(bp->pdev->irq, dev);
4123 pci_disable_msi(bp->pdev);
4124 bp->flags &= ~USING_MSI_FLAG;
4126 rc = bnx2_init_nic(bp);
4129 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4130 SA_SHIRQ, dev->name, dev);
4135 del_timer_sync(&bp->timer);
4138 bnx2_enable_int(bp);
4141 if (bp->flags & USING_MSI_FLAG) {
4142 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4145 netif_start_queue(dev);
4151 bnx2_reset_task(void *data)
4153 struct bnx2 *bp = data;
4155 if (!netif_running(bp->dev))
4158 bp->in_reset_task = 1;
4159 bnx2_netif_stop(bp);
4163 atomic_set(&bp->intr_sem, 1);
4164 bnx2_netif_start(bp);
4165 bp->in_reset_task = 0;
4169 bnx2_tx_timeout(struct net_device *dev)
4171 struct bnx2 *bp = netdev_priv(dev);
4173 /* This allows the netif to be shutdown gracefully before resetting */
4174 schedule_work(&bp->reset_task);
4178 /* Called with rtnl_lock */
4180 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4182 struct bnx2 *bp = netdev_priv(dev);
4184 bnx2_netif_stop(bp);
4187 bnx2_set_rx_mode(dev);
4189 bnx2_netif_start(bp);
4192 /* Called with rtnl_lock */
4194 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4196 struct bnx2 *bp = netdev_priv(dev);
4198 bnx2_netif_stop(bp);
4201 bp->vlgrp->vlan_devices[vid] = NULL;
4202 bnx2_set_rx_mode(dev);
4204 bnx2_netif_start(bp);
4208 /* Called with dev->xmit_lock.
4209 * hard_start_xmit is pseudo-lockless - a lock is only required when
4210 * the tx queue is full. This way, we get the benefit of lockless
4211 * operations most of the time without the complexities to handle
4212 * netif_stop_queue/wake_queue race conditions.
4215 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4217 struct bnx2 *bp = netdev_priv(dev);
4220 struct sw_bd *tx_buf;
4221 u32 len, vlan_tag_flags, last_frag, mss;
4222 u16 prod, ring_prod;
4225 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4226 netif_stop_queue(dev);
4227 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4230 return NETDEV_TX_BUSY;
4232 len = skb_headlen(skb);
4234 ring_prod = TX_RING_IDX(prod);
4237 if (skb->ip_summed == CHECKSUM_HW) {
4238 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4241 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4243 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4246 if ((mss = skb_shinfo(skb)->tso_size) &&
4247 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4248 u32 tcp_opt_len, ip_tcp_len;
4250 if (skb_header_cloned(skb) &&
4251 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4253 return NETDEV_TX_OK;
4256 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4257 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4260 if (skb->h.th->doff > 5) {
4261 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4263 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4265 skb->nh.iph->check = 0;
4266 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
4268 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4272 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4273 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4274 (tcp_opt_len >> 2)) << 8;
4283 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4285 tx_buf = &bp->tx_buf_ring[ring_prod];
4287 pci_unmap_addr_set(tx_buf, mapping, mapping);
4289 txbd = &bp->tx_desc_ring[ring_prod];
4291 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4292 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4293 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4294 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4296 last_frag = skb_shinfo(skb)->nr_frags;
4298 for (i = 0; i < last_frag; i++) {
4299 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4301 prod = NEXT_TX_BD(prod);
4302 ring_prod = TX_RING_IDX(prod);
4303 txbd = &bp->tx_desc_ring[ring_prod];
4306 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4307 len, PCI_DMA_TODEVICE);
4308 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4311 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4312 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4313 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4314 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4317 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4319 prod = NEXT_TX_BD(prod);
4320 bp->tx_prod_bseq += skb->len;
4322 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4323 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4328 dev->trans_start = jiffies;
4330 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4331 spin_lock(&bp->tx_lock);
4332 netif_stop_queue(dev);
4334 if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
4335 netif_wake_queue(dev);
4336 spin_unlock(&bp->tx_lock);
4339 return NETDEV_TX_OK;
4342 /* Called with rtnl_lock */
4344 bnx2_close(struct net_device *dev)
4346 struct bnx2 *bp = netdev_priv(dev);
4349 /* Calling flush_scheduled_work() may deadlock because
4350 * linkwatch_event() may be on the workqueue and it will try to get
4351 * the rtnl_lock which we are holding.
4353 while (bp->in_reset_task)
4356 bnx2_netif_stop(bp);
4357 del_timer_sync(&bp->timer);
4358 if (bp->flags & NO_WOL_FLAG)
4359 reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
4361 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4363 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4364 bnx2_reset_chip(bp, reset_code);
4365 free_irq(bp->pdev->irq, dev);
4366 if (bp->flags & USING_MSI_FLAG) {
4367 pci_disable_msi(bp->pdev);
4368 bp->flags &= ~USING_MSI_FLAG;
4373 netif_carrier_off(bp->dev);
4374 bnx2_set_power_state(bp, PCI_D3hot);
4378 #define GET_NET_STATS64(ctr) \
4379 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4380 (unsigned long) (ctr##_lo)
4382 #define GET_NET_STATS32(ctr) \
4385 #if (BITS_PER_LONG == 64)
4386 #define GET_NET_STATS GET_NET_STATS64
4388 #define GET_NET_STATS GET_NET_STATS32
4391 static struct net_device_stats *
4392 bnx2_get_stats(struct net_device *dev)
4394 struct bnx2 *bp = netdev_priv(dev);
4395 struct statistics_block *stats_blk = bp->stats_blk;
4396 struct net_device_stats *net_stats = &bp->net_stats;
4398 if (bp->stats_blk == NULL) {
4401 net_stats->rx_packets =
4402 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4403 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4404 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4406 net_stats->tx_packets =
4407 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4408 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4409 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4411 net_stats->rx_bytes =
4412 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4414 net_stats->tx_bytes =
4415 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4417 net_stats->multicast =
4418 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4420 net_stats->collisions =
4421 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4423 net_stats->rx_length_errors =
4424 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4425 stats_blk->stat_EtherStatsOverrsizePkts);
4427 net_stats->rx_over_errors =
4428 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4430 net_stats->rx_frame_errors =
4431 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4433 net_stats->rx_crc_errors =
4434 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4436 net_stats->rx_errors = net_stats->rx_length_errors +
4437 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4438 net_stats->rx_crc_errors;
4440 net_stats->tx_aborted_errors =
4441 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4442 stats_blk->stat_Dot3StatsLateCollisions);
4444 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4445 (CHIP_ID(bp) == CHIP_ID_5708_A0))
4446 net_stats->tx_carrier_errors = 0;
4448 net_stats->tx_carrier_errors =
4450 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4453 net_stats->tx_errors =
4455 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4457 net_stats->tx_aborted_errors +
4458 net_stats->tx_carrier_errors;
4463 /* All ethtool functions called with rtnl_lock */
4466 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4468 struct bnx2 *bp = netdev_priv(dev);
4470 cmd->supported = SUPPORTED_Autoneg;
4471 if (bp->phy_flags & PHY_SERDES_FLAG) {
4472 cmd->supported |= SUPPORTED_1000baseT_Full |
4475 cmd->port = PORT_FIBRE;
4478 cmd->supported |= SUPPORTED_10baseT_Half |
4479 SUPPORTED_10baseT_Full |
4480 SUPPORTED_100baseT_Half |
4481 SUPPORTED_100baseT_Full |
4482 SUPPORTED_1000baseT_Full |
4485 cmd->port = PORT_TP;
4488 cmd->advertising = bp->advertising;
4490 if (bp->autoneg & AUTONEG_SPEED) {
4491 cmd->autoneg = AUTONEG_ENABLE;
4494 cmd->autoneg = AUTONEG_DISABLE;
4497 if (netif_carrier_ok(dev)) {
4498 cmd->speed = bp->line_speed;
4499 cmd->duplex = bp->duplex;
4506 cmd->transceiver = XCVR_INTERNAL;
4507 cmd->phy_address = bp->phy_addr;
4513 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4515 struct bnx2 *bp = netdev_priv(dev);
4516 u8 autoneg = bp->autoneg;
4517 u8 req_duplex = bp->req_duplex;
4518 u16 req_line_speed = bp->req_line_speed;
4519 u32 advertising = bp->advertising;
4521 if (cmd->autoneg == AUTONEG_ENABLE) {
4522 autoneg |= AUTONEG_SPEED;
4524 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4526 /* allow advertising 1 speed */
4527 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4528 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4529 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4530 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4532 if (bp->phy_flags & PHY_SERDES_FLAG)
4535 advertising = cmd->advertising;
4538 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4539 advertising = cmd->advertising;
4541 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4545 if (bp->phy_flags & PHY_SERDES_FLAG) {
4546 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4549 advertising = ETHTOOL_ALL_COPPER_SPEED;
4552 advertising |= ADVERTISED_Autoneg;
4555 if (bp->phy_flags & PHY_SERDES_FLAG) {
4556 if ((cmd->speed != SPEED_1000) ||
4557 (cmd->duplex != DUPLEX_FULL)) {
4561 else if (cmd->speed == SPEED_1000) {
4564 autoneg &= ~AUTONEG_SPEED;
4565 req_line_speed = cmd->speed;
4566 req_duplex = cmd->duplex;
4570 bp->autoneg = autoneg;
4571 bp->advertising = advertising;
4572 bp->req_line_speed = req_line_speed;
4573 bp->req_duplex = req_duplex;
4575 spin_lock_bh(&bp->phy_lock);
4579 spin_unlock_bh(&bp->phy_lock);
4585 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4587 struct bnx2 *bp = netdev_priv(dev);
4589 strcpy(info->driver, DRV_MODULE_NAME);
4590 strcpy(info->version, DRV_MODULE_VERSION);
4591 strcpy(info->bus_info, pci_name(bp->pdev));
4592 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4593 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4594 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4595 info->fw_version[1] = info->fw_version[3] = '.';
4596 info->fw_version[5] = 0;
4599 #define BNX2_REGDUMP_LEN (32 * 1024)
4602 bnx2_get_regs_len(struct net_device *dev)
4604 return BNX2_REGDUMP_LEN;
4608 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4610 u32 *p = _p, i, offset;
4612 struct bnx2 *bp = netdev_priv(dev);
4613 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4614 0x0800, 0x0880, 0x0c00, 0x0c10,
4615 0x0c30, 0x0d08, 0x1000, 0x101c,
4616 0x1040, 0x1048, 0x1080, 0x10a4,
4617 0x1400, 0x1490, 0x1498, 0x14f0,
4618 0x1500, 0x155c, 0x1580, 0x15dc,
4619 0x1600, 0x1658, 0x1680, 0x16d8,
4620 0x1800, 0x1820, 0x1840, 0x1854,
4621 0x1880, 0x1894, 0x1900, 0x1984,
4622 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4623 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4624 0x2000, 0x2030, 0x23c0, 0x2400,
4625 0x2800, 0x2820, 0x2830, 0x2850,
4626 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4627 0x3c00, 0x3c94, 0x4000, 0x4010,
4628 0x4080, 0x4090, 0x43c0, 0x4458,
4629 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4630 0x4fc0, 0x5010, 0x53c0, 0x5444,
4631 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4632 0x5fc0, 0x6000, 0x6400, 0x6428,
4633 0x6800, 0x6848, 0x684c, 0x6860,
4634 0x6888, 0x6910, 0x8000 };
4638 memset(p, 0, BNX2_REGDUMP_LEN);
4640 if (!netif_running(bp->dev))
4644 offset = reg_boundaries[0];
4646 while (offset < BNX2_REGDUMP_LEN) {
4647 *p++ = REG_RD(bp, offset);
4649 if (offset == reg_boundaries[i + 1]) {
4650 offset = reg_boundaries[i + 2];
4651 p = (u32 *) (orig_p + offset);
4658 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4660 struct bnx2 *bp = netdev_priv(dev);
4662 if (bp->flags & NO_WOL_FLAG) {
4667 wol->supported = WAKE_MAGIC;
4669 wol->wolopts = WAKE_MAGIC;
4673 memset(&wol->sopass, 0, sizeof(wol->sopass));
4677 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4679 struct bnx2 *bp = netdev_priv(dev);
4681 if (wol->wolopts & ~WAKE_MAGIC)
4684 if (wol->wolopts & WAKE_MAGIC) {
4685 if (bp->flags & NO_WOL_FLAG)
4697 bnx2_nway_reset(struct net_device *dev)
4699 struct bnx2 *bp = netdev_priv(dev);
4702 if (!(bp->autoneg & AUTONEG_SPEED)) {
4706 spin_lock_bh(&bp->phy_lock);
4708 /* Force a link down visible on the other side */
4709 if (bp->phy_flags & PHY_SERDES_FLAG) {
4710 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4711 spin_unlock_bh(&bp->phy_lock);
4715 spin_lock_bh(&bp->phy_lock);
4716 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
4717 bp->current_interval = SERDES_AN_TIMEOUT;
4718 bp->serdes_an_pending = 1;
4719 mod_timer(&bp->timer, jiffies + bp->current_interval);
4723 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4724 bmcr &= ~BMCR_LOOPBACK;
4725 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4727 spin_unlock_bh(&bp->phy_lock);
4733 bnx2_get_eeprom_len(struct net_device *dev)
4735 struct bnx2 *bp = netdev_priv(dev);
4737 if (bp->flash_info == NULL)
4740 return (int) bp->flash_size;
4744 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4747 struct bnx2 *bp = netdev_priv(dev);
4750 /* parameters already validated in ethtool_get_eeprom */
4752 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4758 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4761 struct bnx2 *bp = netdev_priv(dev);
4764 /* parameters already validated in ethtool_set_eeprom */
4766 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
4772 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4774 struct bnx2 *bp = netdev_priv(dev);
4776 memset(coal, 0, sizeof(struct ethtool_coalesce));
4778 coal->rx_coalesce_usecs = bp->rx_ticks;
4779 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
4780 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
4781 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
4783 coal->tx_coalesce_usecs = bp->tx_ticks;
4784 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
4785 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
4786 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
4788 coal->stats_block_coalesce_usecs = bp->stats_ticks;
4794 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4796 struct bnx2 *bp = netdev_priv(dev);
4798 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
4799 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
4801 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
4802 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
4804 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
4805 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
4807 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
4808 if (bp->rx_quick_cons_trip_int > 0xff)
4809 bp->rx_quick_cons_trip_int = 0xff;
4811 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
4812 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
4814 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
4815 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
4817 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
4818 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
4820 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
4821 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
4824 bp->stats_ticks = coal->stats_block_coalesce_usecs;
4825 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
4826 bp->stats_ticks &= 0xffff00;
4828 if (netif_running(bp->dev)) {
4829 bnx2_netif_stop(bp);
4831 bnx2_netif_start(bp);
4838 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4840 struct bnx2 *bp = netdev_priv(dev);
4842 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
4843 ering->rx_mini_max_pending = 0;
4844 ering->rx_jumbo_max_pending = 0;
4846 ering->rx_pending = bp->rx_ring_size;
4847 ering->rx_mini_pending = 0;
4848 ering->rx_jumbo_pending = 0;
4850 ering->tx_max_pending = MAX_TX_DESC_CNT;
4851 ering->tx_pending = bp->tx_ring_size;
4855 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4857 struct bnx2 *bp = netdev_priv(dev);
4859 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
4860 (ering->tx_pending > MAX_TX_DESC_CNT) ||
4861 (ering->tx_pending <= MAX_SKB_FRAGS)) {
4865 if (netif_running(bp->dev)) {
4866 bnx2_netif_stop(bp);
4867 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
4872 bnx2_set_rx_ring_size(bp, ering->rx_pending);
4873 bp->tx_ring_size = ering->tx_pending;
4875 if (netif_running(bp->dev)) {
4878 rc = bnx2_alloc_mem(bp);
4882 bnx2_netif_start(bp);
4889 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4891 struct bnx2 *bp = netdev_priv(dev);
4893 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
4894 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
4895 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
4899 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
4901 struct bnx2 *bp = netdev_priv(dev);
4903 bp->req_flow_ctrl = 0;
4904 if (epause->rx_pause)
4905 bp->req_flow_ctrl |= FLOW_CTRL_RX;
4906 if (epause->tx_pause)
4907 bp->req_flow_ctrl |= FLOW_CTRL_TX;
4909 if (epause->autoneg) {
4910 bp->autoneg |= AUTONEG_FLOW_CTRL;
4913 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
4916 spin_lock_bh(&bp->phy_lock);
4920 spin_unlock_bh(&bp->phy_lock);
4926 bnx2_get_rx_csum(struct net_device *dev)
4928 struct bnx2 *bp = netdev_priv(dev);
4934 bnx2_set_rx_csum(struct net_device *dev, u32 data)
4936 struct bnx2 *bp = netdev_priv(dev);
4942 #define BNX2_NUM_STATS 45
4945 char string[ETH_GSTRING_LEN];
4946 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
4948 { "rx_error_bytes" },
4950 { "tx_error_bytes" },
4951 { "rx_ucast_packets" },
4952 { "rx_mcast_packets" },
4953 { "rx_bcast_packets" },
4954 { "tx_ucast_packets" },
4955 { "tx_mcast_packets" },
4956 { "tx_bcast_packets" },
4957 { "tx_mac_errors" },
4958 { "tx_carrier_errors" },
4959 { "rx_crc_errors" },
4960 { "rx_align_errors" },
4961 { "tx_single_collisions" },
4962 { "tx_multi_collisions" },
4964 { "tx_excess_collisions" },
4965 { "tx_late_collisions" },
4966 { "tx_total_collisions" },
4969 { "rx_undersize_packets" },
4970 { "rx_oversize_packets" },
4971 { "rx_64_byte_packets" },
4972 { "rx_65_to_127_byte_packets" },
4973 { "rx_128_to_255_byte_packets" },
4974 { "rx_256_to_511_byte_packets" },
4975 { "rx_512_to_1023_byte_packets" },
4976 { "rx_1024_to_1522_byte_packets" },
4977 { "rx_1523_to_9022_byte_packets" },
4978 { "tx_64_byte_packets" },
4979 { "tx_65_to_127_byte_packets" },
4980 { "tx_128_to_255_byte_packets" },
4981 { "tx_256_to_511_byte_packets" },
4982 { "tx_512_to_1023_byte_packets" },
4983 { "tx_1024_to_1522_byte_packets" },
4984 { "tx_1523_to_9022_byte_packets" },
4985 { "rx_xon_frames" },
4986 { "rx_xoff_frames" },
4987 { "tx_xon_frames" },
4988 { "tx_xoff_frames" },
4989 { "rx_mac_ctrl_frames" },
4990 { "rx_filtered_packets" },
4994 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
4996 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
4997 STATS_OFFSET32(stat_IfHCInOctets_hi),
4998 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
4999 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5000 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5001 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5002 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5003 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5004 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5005 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5006 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5007 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5008 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5009 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5010 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5011 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5012 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5013 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5014 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5015 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5016 STATS_OFFSET32(stat_EtherStatsCollisions),
5017 STATS_OFFSET32(stat_EtherStatsFragments),
5018 STATS_OFFSET32(stat_EtherStatsJabbers),
5019 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5020 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5021 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5022 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5023 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5024 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5025 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5026 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5027 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5028 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5029 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5030 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5031 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5032 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5033 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5034 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5035 STATS_OFFSET32(stat_XonPauseFramesReceived),
5036 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5037 STATS_OFFSET32(stat_OutXonSent),
5038 STATS_OFFSET32(stat_OutXoffSent),
5039 STATS_OFFSET32(stat_MacControlFramesReceived),
5040 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5041 STATS_OFFSET32(stat_IfInMBUFDiscards),
5044 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5045 * skipped because of errata.
5047 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5048 8,0,8,8,8,8,8,8,8,8,
5049 4,0,4,4,4,4,4,4,4,4,
5050 4,4,4,4,4,4,4,4,4,4,
5051 4,4,4,4,4,4,4,4,4,4,
5055 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5056 8,0,8,8,8,8,8,8,8,8,
5057 4,4,4,4,4,4,4,4,4,4,
5058 4,4,4,4,4,4,4,4,4,4,
5059 4,4,4,4,4,4,4,4,4,4,
5063 #define BNX2_NUM_TESTS 6
5066 char string[ETH_GSTRING_LEN];
5067 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5068 { "register_test (offline)" },
5069 { "memory_test (offline)" },
5070 { "loopback_test (offline)" },
5071 { "nvram_test (online)" },
5072 { "interrupt_test (online)" },
5073 { "link_test (online)" },
5077 bnx2_self_test_count(struct net_device *dev)
5079 return BNX2_NUM_TESTS;
5083 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5085 struct bnx2 *bp = netdev_priv(dev);
5087 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5088 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5089 bnx2_netif_stop(bp);
5090 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5093 if (bnx2_test_registers(bp) != 0) {
5095 etest->flags |= ETH_TEST_FL_FAILED;
5097 if (bnx2_test_memory(bp) != 0) {
5099 etest->flags |= ETH_TEST_FL_FAILED;
5101 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5102 etest->flags |= ETH_TEST_FL_FAILED;
5104 if (!netif_running(bp->dev)) {
5105 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5109 bnx2_netif_start(bp);
5112 /* wait for link up */
5113 msleep_interruptible(3000);
5114 if ((!bp->link_up) && !(bp->phy_flags & PHY_SERDES_FLAG))
5115 msleep_interruptible(4000);
5118 if (bnx2_test_nvram(bp) != 0) {
5120 etest->flags |= ETH_TEST_FL_FAILED;
5122 if (bnx2_test_intr(bp) != 0) {
5124 etest->flags |= ETH_TEST_FL_FAILED;
5127 if (bnx2_test_link(bp) != 0) {
5129 etest->flags |= ETH_TEST_FL_FAILED;
5135 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5137 switch (stringset) {
5139 memcpy(buf, bnx2_stats_str_arr,
5140 sizeof(bnx2_stats_str_arr));
5143 memcpy(buf, bnx2_tests_str_arr,
5144 sizeof(bnx2_tests_str_arr));
5150 bnx2_get_stats_count(struct net_device *dev)
5152 return BNX2_NUM_STATS;
5156 bnx2_get_ethtool_stats(struct net_device *dev,
5157 struct ethtool_stats *stats, u64 *buf)
5159 struct bnx2 *bp = netdev_priv(dev);
5161 u32 *hw_stats = (u32 *) bp->stats_blk;
5162 u8 *stats_len_arr = NULL;
5164 if (hw_stats == NULL) {
5165 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5169 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5170 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5171 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5172 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5173 stats_len_arr = bnx2_5706_stats_len_arr;
5175 stats_len_arr = bnx2_5708_stats_len_arr;
5177 for (i = 0; i < BNX2_NUM_STATS; i++) {
5178 if (stats_len_arr[i] == 0) {
5179 /* skip this counter */
5183 if (stats_len_arr[i] == 4) {
5184 /* 4-byte counter */
5186 *(hw_stats + bnx2_stats_offset_arr[i]);
5189 /* 8-byte counter */
5190 buf[i] = (((u64) *(hw_stats +
5191 bnx2_stats_offset_arr[i])) << 32) +
5192 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5197 bnx2_phys_id(struct net_device *dev, u32 data)
5199 struct bnx2 *bp = netdev_priv(dev);
5206 save = REG_RD(bp, BNX2_MISC_CFG);
5207 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5209 for (i = 0; i < (data * 2); i++) {
5211 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5214 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5215 BNX2_EMAC_LED_1000MB_OVERRIDE |
5216 BNX2_EMAC_LED_100MB_OVERRIDE |
5217 BNX2_EMAC_LED_10MB_OVERRIDE |
5218 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5219 BNX2_EMAC_LED_TRAFFIC);
5221 msleep_interruptible(500);
5222 if (signal_pending(current))
5225 REG_WR(bp, BNX2_EMAC_LED, 0);
5226 REG_WR(bp, BNX2_MISC_CFG, save);
5230 static struct ethtool_ops bnx2_ethtool_ops = {
5231 .get_settings = bnx2_get_settings,
5232 .set_settings = bnx2_set_settings,
5233 .get_drvinfo = bnx2_get_drvinfo,
5234 .get_regs_len = bnx2_get_regs_len,
5235 .get_regs = bnx2_get_regs,
5236 .get_wol = bnx2_get_wol,
5237 .set_wol = bnx2_set_wol,
5238 .nway_reset = bnx2_nway_reset,
5239 .get_link = ethtool_op_get_link,
5240 .get_eeprom_len = bnx2_get_eeprom_len,
5241 .get_eeprom = bnx2_get_eeprom,
5242 .set_eeprom = bnx2_set_eeprom,
5243 .get_coalesce = bnx2_get_coalesce,
5244 .set_coalesce = bnx2_set_coalesce,
5245 .get_ringparam = bnx2_get_ringparam,
5246 .set_ringparam = bnx2_set_ringparam,
5247 .get_pauseparam = bnx2_get_pauseparam,
5248 .set_pauseparam = bnx2_set_pauseparam,
5249 .get_rx_csum = bnx2_get_rx_csum,
5250 .set_rx_csum = bnx2_set_rx_csum,
5251 .get_tx_csum = ethtool_op_get_tx_csum,
5252 .set_tx_csum = ethtool_op_set_tx_csum,
5253 .get_sg = ethtool_op_get_sg,
5254 .set_sg = ethtool_op_set_sg,
5256 .get_tso = ethtool_op_get_tso,
5257 .set_tso = ethtool_op_set_tso,
5259 .self_test_count = bnx2_self_test_count,
5260 .self_test = bnx2_self_test,
5261 .get_strings = bnx2_get_strings,
5262 .phys_id = bnx2_phys_id,
5263 .get_stats_count = bnx2_get_stats_count,
5264 .get_ethtool_stats = bnx2_get_ethtool_stats,
5265 .get_perm_addr = ethtool_op_get_perm_addr,
5268 /* Called with rtnl_lock */
5270 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5272 struct mii_ioctl_data *data = if_mii(ifr);
5273 struct bnx2 *bp = netdev_priv(dev);
5278 data->phy_id = bp->phy_addr;
5284 spin_lock_bh(&bp->phy_lock);
5285 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5286 spin_unlock_bh(&bp->phy_lock);
5288 data->val_out = mii_regval;
5294 if (!capable(CAP_NET_ADMIN))
5297 spin_lock_bh(&bp->phy_lock);
5298 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5299 spin_unlock_bh(&bp->phy_lock);
5310 /* Called with rtnl_lock */
5312 bnx2_change_mac_addr(struct net_device *dev, void *p)
5314 struct sockaddr *addr = p;
5315 struct bnx2 *bp = netdev_priv(dev);
5317 if (!is_valid_ether_addr(addr->sa_data))
5320 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5321 if (netif_running(dev))
5322 bnx2_set_mac_addr(bp);
5327 /* Called with rtnl_lock */
5329 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5331 struct bnx2 *bp = netdev_priv(dev);
5333 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5334 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5338 if (netif_running(dev)) {
5339 bnx2_netif_stop(bp);
5343 bnx2_netif_start(bp);
5348 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5350 poll_bnx2(struct net_device *dev)
5352 struct bnx2 *bp = netdev_priv(dev);
5354 disable_irq(bp->pdev->irq);
5355 bnx2_interrupt(bp->pdev->irq, dev, NULL);
5356 enable_irq(bp->pdev->irq);
5360 static int __devinit
5361 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5364 unsigned long mem_len;
5368 SET_MODULE_OWNER(dev);
5369 SET_NETDEV_DEV(dev, &pdev->dev);
5370 bp = netdev_priv(dev);
5375 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5376 rc = pci_enable_device(pdev);
5378 printk(KERN_ERR PFX "Cannot enable PCI device, aborting.");
5382 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5383 printk(KERN_ERR PFX "Cannot find PCI device base address, "
5386 goto err_out_disable;
5389 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5391 printk(KERN_ERR PFX "Cannot obtain PCI resources, aborting.\n");
5392 goto err_out_disable;
5395 pci_set_master(pdev);
5397 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5398 if (bp->pm_cap == 0) {
5399 printk(KERN_ERR PFX "Cannot find power management capability, "
5402 goto err_out_release;
5405 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5406 if (bp->pcix_cap == 0) {
5407 printk(KERN_ERR PFX "Cannot find PCIX capability, aborting.\n");
5409 goto err_out_release;
5412 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5413 bp->flags |= USING_DAC_FLAG;
5414 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5415 printk(KERN_ERR PFX "pci_set_consistent_dma_mask "
5416 "failed, aborting.\n");
5418 goto err_out_release;
5421 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5422 printk(KERN_ERR PFX "System does not support DMA, aborting.\n");
5424 goto err_out_release;
5430 spin_lock_init(&bp->phy_lock);
5431 spin_lock_init(&bp->tx_lock);
5432 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5434 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5435 mem_len = MB_GET_CID_ADDR(17);
5436 dev->mem_end = dev->mem_start + mem_len;
5437 dev->irq = pdev->irq;
5439 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5442 printk(KERN_ERR PFX "Cannot map register space, aborting.\n");
5444 goto err_out_release;
5447 /* Configure byte swap and enable write to the reg_window registers.
5448 * Rely on CPU to do target byte swapping on big endian systems
5449 * The chip's target access swapping will not swap all accesses
5451 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5452 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5453 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5455 bnx2_set_power_state(bp, PCI_D0);
5457 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5459 /* Get bus information. */
5460 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5461 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5464 bp->flags |= PCIX_FLAG;
5466 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5468 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5470 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5471 bp->bus_speed_mhz = 133;
5474 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5475 bp->bus_speed_mhz = 100;
5478 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5479 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5480 bp->bus_speed_mhz = 66;
5483 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5484 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5485 bp->bus_speed_mhz = 50;
5488 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5489 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5490 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5491 bp->bus_speed_mhz = 33;
5496 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5497 bp->bus_speed_mhz = 66;
5499 bp->bus_speed_mhz = 33;
5502 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5503 bp->flags |= PCI_32BIT_FLAG;
5505 /* 5706A0 may falsely detect SERR and PERR. */
5506 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5507 reg = REG_RD(bp, PCI_COMMAND);
5508 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5509 REG_WR(bp, PCI_COMMAND, reg);
5511 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5512 !(bp->flags & PCIX_FLAG)) {
5514 printk(KERN_ERR PFX "5706 A1 can only be used in a PCIX bus, "
5519 bnx2_init_nvram(bp);
5521 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5523 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5524 BNX2_SHM_HDR_SIGNATURE_SIG)
5525 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5527 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5529 /* Get the permanent MAC address. First we need to make sure the
5530 * firmware is actually running.
5532 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5534 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5535 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5536 printk(KERN_ERR PFX "Firmware not running, aborting.\n");
5541 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5543 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5544 bp->mac_addr[0] = (u8) (reg >> 8);
5545 bp->mac_addr[1] = (u8) reg;
5547 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5548 bp->mac_addr[2] = (u8) (reg >> 24);
5549 bp->mac_addr[3] = (u8) (reg >> 16);
5550 bp->mac_addr[4] = (u8) (reg >> 8);
5551 bp->mac_addr[5] = (u8) reg;
5553 bp->tx_ring_size = MAX_TX_DESC_CNT;
5554 bnx2_set_rx_ring_size(bp, 100);
5558 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5560 bp->tx_quick_cons_trip_int = 20;
5561 bp->tx_quick_cons_trip = 20;
5562 bp->tx_ticks_int = 80;
5565 bp->rx_quick_cons_trip_int = 6;
5566 bp->rx_quick_cons_trip = 6;
5567 bp->rx_ticks_int = 18;
5570 bp->stats_ticks = 1000000 & 0xffff00;
5572 bp->timer_interval = HZ;
5573 bp->current_interval = HZ;
5577 /* Disable WOL support if we are running on a SERDES chip. */
5578 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5579 bp->phy_flags |= PHY_SERDES_FLAG;
5580 bp->flags |= NO_WOL_FLAG;
5581 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5583 reg = REG_RD_IND(bp, bp->shmem_base +
5584 BNX2_SHARED_HW_CFG_CONFIG);
5585 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5586 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5590 if (CHIP_NUM(bp) == CHIP_NUM_5708)
5591 bp->flags |= NO_WOL_FLAG;
5593 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5594 bp->tx_quick_cons_trip_int =
5595 bp->tx_quick_cons_trip;
5596 bp->tx_ticks_int = bp->tx_ticks;
5597 bp->rx_quick_cons_trip_int =
5598 bp->rx_quick_cons_trip;
5599 bp->rx_ticks_int = bp->rx_ticks;
5600 bp->comp_prod_trip_int = bp->comp_prod_trip;
5601 bp->com_ticks_int = bp->com_ticks;
5602 bp->cmd_ticks_int = bp->cmd_ticks;
5605 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5606 bp->req_line_speed = 0;
5607 if (bp->phy_flags & PHY_SERDES_FLAG) {
5608 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5610 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
5611 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5612 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5614 bp->req_line_speed = bp->line_speed = SPEED_1000;
5615 bp->req_duplex = DUPLEX_FULL;
5619 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5622 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5624 init_timer(&bp->timer);
5625 bp->timer.expires = RUN_AT(bp->timer_interval);
5626 bp->timer.data = (unsigned long) bp;
5627 bp->timer.function = bnx2_timer;
5633 iounmap(bp->regview);
5638 pci_release_regions(pdev);
5641 pci_disable_device(pdev);
5642 pci_set_drvdata(pdev, NULL);
5648 static int __devinit
5649 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5651 static int version_printed = 0;
5652 struct net_device *dev = NULL;
5656 if (version_printed++ == 0)
5657 printk(KERN_INFO "%s", version);
5659 /* dev zeroed in init_etherdev */
5660 dev = alloc_etherdev(sizeof(*bp));
5665 rc = bnx2_init_board(pdev, dev);
5671 dev->open = bnx2_open;
5672 dev->hard_start_xmit = bnx2_start_xmit;
5673 dev->stop = bnx2_close;
5674 dev->get_stats = bnx2_get_stats;
5675 dev->set_multicast_list = bnx2_set_rx_mode;
5676 dev->do_ioctl = bnx2_ioctl;
5677 dev->set_mac_address = bnx2_change_mac_addr;
5678 dev->change_mtu = bnx2_change_mtu;
5679 dev->tx_timeout = bnx2_tx_timeout;
5680 dev->watchdog_timeo = TX_TIMEOUT;
5682 dev->vlan_rx_register = bnx2_vlan_rx_register;
5683 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5685 dev->poll = bnx2_poll;
5686 dev->ethtool_ops = &bnx2_ethtool_ops;
5689 bp = netdev_priv(dev);
5691 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5692 dev->poll_controller = poll_bnx2;
5695 if ((rc = register_netdev(dev))) {
5696 printk(KERN_ERR PFX "Cannot register net device\n");
5698 iounmap(bp->regview);
5699 pci_release_regions(pdev);
5700 pci_disable_device(pdev);
5701 pci_set_drvdata(pdev, NULL);
5706 pci_set_drvdata(pdev, dev);
5708 memcpy(dev->dev_addr, bp->mac_addr, 6);
5709 memcpy(dev->perm_addr, bp->mac_addr, 6);
5710 bp->name = board_info[ent->driver_data].name,
5711 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5715 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5716 ((CHIP_ID(bp) & 0x0ff0) >> 4),
5717 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
5718 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
5723 printk("node addr ");
5724 for (i = 0; i < 6; i++)
5725 printk("%2.2x", dev->dev_addr[i]);
5728 dev->features |= NETIF_F_SG;
5729 if (bp->flags & USING_DAC_FLAG)
5730 dev->features |= NETIF_F_HIGHDMA;
5731 dev->features |= NETIF_F_IP_CSUM;
5733 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5736 dev->features |= NETIF_F_TSO;
5739 netif_carrier_off(bp->dev);
5744 static void __devexit
5745 bnx2_remove_one(struct pci_dev *pdev)
5747 struct net_device *dev = pci_get_drvdata(pdev);
5748 struct bnx2 *bp = netdev_priv(dev);
5750 flush_scheduled_work();
5752 unregister_netdev(dev);
5755 iounmap(bp->regview);
5758 pci_release_regions(pdev);
5759 pci_disable_device(pdev);
5760 pci_set_drvdata(pdev, NULL);
5764 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
5766 struct net_device *dev = pci_get_drvdata(pdev);
5767 struct bnx2 *bp = netdev_priv(dev);
5770 if (!netif_running(dev))
5773 flush_scheduled_work();
5774 bnx2_netif_stop(bp);
5775 netif_device_detach(dev);
5776 del_timer_sync(&bp->timer);
5777 if (bp->flags & NO_WOL_FLAG)
5778 reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
5780 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5782 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5783 bnx2_reset_chip(bp, reset_code);
5785 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
5790 bnx2_resume(struct pci_dev *pdev)
5792 struct net_device *dev = pci_get_drvdata(pdev);
5793 struct bnx2 *bp = netdev_priv(dev);
5795 if (!netif_running(dev))
5798 bnx2_set_power_state(bp, PCI_D0);
5799 netif_device_attach(dev);
5801 bnx2_netif_start(bp);
5805 static struct pci_driver bnx2_pci_driver = {
5806 .name = DRV_MODULE_NAME,
5807 .id_table = bnx2_pci_tbl,
5808 .probe = bnx2_init_one,
5809 .remove = __devexit_p(bnx2_remove_one),
5810 .suspend = bnx2_suspend,
5811 .resume = bnx2_resume,
5814 static int __init bnx2_init(void)
5816 return pci_module_init(&bnx2_pci_driver);
5819 static void __exit bnx2_cleanup(void)
5821 pci_unregister_driver(&bnx2_pci_driver);
5824 module_init(bnx2_init);
5825 module_exit(bnx2_cleanup);