]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/bnx2.c
0614ca0b15fea6be4354e53c9eb3cc74fe260f0e
[karo-tx-linux.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <asm/page.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/if_vlan.h>
40 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
41 #define BCM_VLAN 1
42 #endif
43 #include <net/ip.h>
44 #include <net/tcp.h>
45 #include <net/checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/firmware.h>
51 #include <linux/log2.h>
52
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59
60 #define DRV_MODULE_NAME         "bnx2"
61 #define DRV_MODULE_VERSION      "2.0.15"
62 #define DRV_MODULE_RELDATE      "May 4, 2010"
63 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-5.0.0.j6.fw"
64 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
65 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-5.0.0.j15.fw"
66 #define FW_RV2P_FILE_09_Ax      "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
67 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
68
69 #define RUN_AT(x) (jiffies + (x))
70
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT  (5*HZ)
73
74 static char version[] __devinitdata =
75         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86
87 static int disable_msi = 0;
88
89 module_param(disable_msi, int, 0);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91
92 typedef enum {
93         BCM5706 = 0,
94         NC370T,
95         NC370I,
96         BCM5706S,
97         NC370F,
98         BCM5708,
99         BCM5708S,
100         BCM5709,
101         BCM5709S,
102         BCM5716,
103         BCM5716S,
104 } board_t;
105
106 /* indexed by board_t, above */
107 static struct {
108         char *name;
109 } board_info[] __devinitdata = {
110         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111         { "HP NC370T Multifunction Gigabit Server Adapter" },
112         { "HP NC370i Multifunction Gigabit Server Adapter" },
113         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114         { "HP NC370F Multifunction Gigabit Server Adapter" },
115         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
118         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
120         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121         };
122
123 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142         { PCI_VENDOR_ID_BROADCOM, 0x163b,
143           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144         { PCI_VENDOR_ID_BROADCOM, 0x163c,
145           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146         { 0, }
147 };
148
149 static const struct flash_spec flash_table[] =
150 {
151 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
153         /* Slow EEPROM */
154         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157          "EEPROM - slow"},
158         /* Expansion entry 0001 */
159         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162          "Entry 0001"},
163         /* Saifun SA25F010 (non-buffered flash) */
164         /* strap, cfg1, & write1 need updates */
165         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168          "Non-buffered flash (128kB)"},
169         /* Saifun SA25F020 (non-buffered flash) */
170         /* strap, cfg1, & write1 need updates */
171         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174          "Non-buffered flash (256kB)"},
175         /* Expansion entry 0100 */
176         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179          "Entry 0100"},
180         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190         /* Saifun SA25F005 (non-buffered flash) */
191         /* strap, cfg1, & write1 need updates */
192         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195          "Non-buffered flash (64kB)"},
196         /* Fast EEPROM */
197         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200          "EEPROM - fast"},
201         /* Expansion entry 1001 */
202         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1001"},
206         /* Expansion entry 1010 */
207         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1010"},
211         /* ATMEL AT45DB011B (buffered flash) */
212         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215          "Buffered flash (128kB)"},
216         /* Expansion entry 1100 */
217         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220          "Entry 1100"},
221         /* Expansion entry 1101 */
222         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225          "Entry 1101"},
226         /* Ateml Expansion entry 1110 */
227         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230          "Entry 1110 (Atmel)"},
231         /* ATMEL AT45DB021B (buffered flash) */
232         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235          "Buffered flash (256kB)"},
236 };
237
238 static const struct flash_spec flash_5709 = {
239         .flags          = BNX2_NV_BUFFERED,
240         .page_bits      = BCM5709_FLASH_PAGE_BITS,
241         .page_size      = BCM5709_FLASH_PAGE_SIZE,
242         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
243         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
244         .name           = "5709 Buffered flash (256kB)",
245 };
246
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248
249 static void bnx2_init_napi(struct bnx2 *bp);
250 static void bnx2_del_napi(struct bnx2 *bp);
251
252 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
253 {
254         u32 diff;
255
256         smp_mb();
257
258         /* The ring uses 256 indices for 255 entries, one of them
259          * needs to be skipped.
260          */
261         diff = txr->tx_prod - txr->tx_cons;
262         if (unlikely(diff >= TX_DESC_CNT)) {
263                 diff &= 0xffff;
264                 if (diff == TX_DESC_CNT)
265                         diff = MAX_TX_DESC_CNT;
266         }
267         return (bp->tx_ring_size - diff);
268 }
269
270 static u32
271 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
272 {
273         u32 val;
274
275         spin_lock_bh(&bp->indirect_lock);
276         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
277         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
278         spin_unlock_bh(&bp->indirect_lock);
279         return val;
280 }
281
282 static void
283 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
284 {
285         spin_lock_bh(&bp->indirect_lock);
286         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
287         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
288         spin_unlock_bh(&bp->indirect_lock);
289 }
290
291 static void
292 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
293 {
294         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
295 }
296
297 static u32
298 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
299 {
300         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
301 }
302
303 static void
304 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
305 {
306         offset += cid_addr;
307         spin_lock_bh(&bp->indirect_lock);
308         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
309                 int i;
310
311                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
312                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
313                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
314                 for (i = 0; i < 5; i++) {
315                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
316                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
317                                 break;
318                         udelay(5);
319                 }
320         } else {
321                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
322                 REG_WR(bp, BNX2_CTX_DATA, val);
323         }
324         spin_unlock_bh(&bp->indirect_lock);
325 }
326
327 #ifdef BCM_CNIC
328 static int
329 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
330 {
331         struct bnx2 *bp = netdev_priv(dev);
332         struct drv_ctl_io *io = &info->data.io;
333
334         switch (info->cmd) {
335         case DRV_CTL_IO_WR_CMD:
336                 bnx2_reg_wr_ind(bp, io->offset, io->data);
337                 break;
338         case DRV_CTL_IO_RD_CMD:
339                 io->data = bnx2_reg_rd_ind(bp, io->offset);
340                 break;
341         case DRV_CTL_CTX_WR_CMD:
342                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
343                 break;
344         default:
345                 return -EINVAL;
346         }
347         return 0;
348 }
349
350 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
351 {
352         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
353         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
354         int sb_id;
355
356         if (bp->flags & BNX2_FLAG_USING_MSIX) {
357                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
358                 bnapi->cnic_present = 0;
359                 sb_id = bp->irq_nvecs;
360                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
361         } else {
362                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
363                 bnapi->cnic_tag = bnapi->last_status_idx;
364                 bnapi->cnic_present = 1;
365                 sb_id = 0;
366                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
367         }
368
369         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
370         cp->irq_arr[0].status_blk = (void *)
371                 ((unsigned long) bnapi->status_blk.msi +
372                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
373         cp->irq_arr[0].status_blk_num = sb_id;
374         cp->num_irq = 1;
375 }
376
377 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
378                               void *data)
379 {
380         struct bnx2 *bp = netdev_priv(dev);
381         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
382
383         if (ops == NULL)
384                 return -EINVAL;
385
386         if (cp->drv_state & CNIC_DRV_STATE_REGD)
387                 return -EBUSY;
388
389         bp->cnic_data = data;
390         rcu_assign_pointer(bp->cnic_ops, ops);
391
392         cp->num_irq = 0;
393         cp->drv_state = CNIC_DRV_STATE_REGD;
394
395         bnx2_setup_cnic_irq_info(bp);
396
397         return 0;
398 }
399
400 static int bnx2_unregister_cnic(struct net_device *dev)
401 {
402         struct bnx2 *bp = netdev_priv(dev);
403         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
404         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
405
406         mutex_lock(&bp->cnic_lock);
407         cp->drv_state = 0;
408         bnapi->cnic_present = 0;
409         rcu_assign_pointer(bp->cnic_ops, NULL);
410         mutex_unlock(&bp->cnic_lock);
411         synchronize_rcu();
412         return 0;
413 }
414
415 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
416 {
417         struct bnx2 *bp = netdev_priv(dev);
418         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
419
420         cp->drv_owner = THIS_MODULE;
421         cp->chip_id = bp->chip_id;
422         cp->pdev = bp->pdev;
423         cp->io_base = bp->regview;
424         cp->drv_ctl = bnx2_drv_ctl;
425         cp->drv_register_cnic = bnx2_register_cnic;
426         cp->drv_unregister_cnic = bnx2_unregister_cnic;
427
428         return cp;
429 }
430 EXPORT_SYMBOL(bnx2_cnic_probe);
431
432 static void
433 bnx2_cnic_stop(struct bnx2 *bp)
434 {
435         struct cnic_ops *c_ops;
436         struct cnic_ctl_info info;
437
438         mutex_lock(&bp->cnic_lock);
439         c_ops = bp->cnic_ops;
440         if (c_ops) {
441                 info.cmd = CNIC_CTL_STOP_CMD;
442                 c_ops->cnic_ctl(bp->cnic_data, &info);
443         }
444         mutex_unlock(&bp->cnic_lock);
445 }
446
447 static void
448 bnx2_cnic_start(struct bnx2 *bp)
449 {
450         struct cnic_ops *c_ops;
451         struct cnic_ctl_info info;
452
453         mutex_lock(&bp->cnic_lock);
454         c_ops = bp->cnic_ops;
455         if (c_ops) {
456                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
457                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
458
459                         bnapi->cnic_tag = bnapi->last_status_idx;
460                 }
461                 info.cmd = CNIC_CTL_START_CMD;
462                 c_ops->cnic_ctl(bp->cnic_data, &info);
463         }
464         mutex_unlock(&bp->cnic_lock);
465 }
466
467 #else
468
469 static void
470 bnx2_cnic_stop(struct bnx2 *bp)
471 {
472 }
473
474 static void
475 bnx2_cnic_start(struct bnx2 *bp)
476 {
477 }
478
479 #endif
480
481 static int
482 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
483 {
484         u32 val1;
485         int i, ret;
486
487         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
488                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
489                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
490
491                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
492                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
493
494                 udelay(40);
495         }
496
497         val1 = (bp->phy_addr << 21) | (reg << 16) |
498                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
499                 BNX2_EMAC_MDIO_COMM_START_BUSY;
500         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
501
502         for (i = 0; i < 50; i++) {
503                 udelay(10);
504
505                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
506                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
507                         udelay(5);
508
509                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
510                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
511
512                         break;
513                 }
514         }
515
516         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
517                 *val = 0x0;
518                 ret = -EBUSY;
519         }
520         else {
521                 *val = val1;
522                 ret = 0;
523         }
524
525         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
526                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
527                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
528
529                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
530                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
531
532                 udelay(40);
533         }
534
535         return ret;
536 }
537
538 static int
539 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
540 {
541         u32 val1;
542         int i, ret;
543
544         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
545                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
546                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
547
548                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
549                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
550
551                 udelay(40);
552         }
553
554         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
555                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
556                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
557         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
558
559         for (i = 0; i < 50; i++) {
560                 udelay(10);
561
562                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
563                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
564                         udelay(5);
565                         break;
566                 }
567         }
568
569         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
570                 ret = -EBUSY;
571         else
572                 ret = 0;
573
574         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
575                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
576                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
577
578                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
579                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
580
581                 udelay(40);
582         }
583
584         return ret;
585 }
586
587 static void
588 bnx2_disable_int(struct bnx2 *bp)
589 {
590         int i;
591         struct bnx2_napi *bnapi;
592
593         for (i = 0; i < bp->irq_nvecs; i++) {
594                 bnapi = &bp->bnx2_napi[i];
595                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
596                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
597         }
598         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
599 }
600
601 static void
602 bnx2_enable_int(struct bnx2 *bp)
603 {
604         int i;
605         struct bnx2_napi *bnapi;
606
607         for (i = 0; i < bp->irq_nvecs; i++) {
608                 bnapi = &bp->bnx2_napi[i];
609
610                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
611                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
612                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
613                        bnapi->last_status_idx);
614
615                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
616                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
617                        bnapi->last_status_idx);
618         }
619         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
620 }
621
622 static void
623 bnx2_disable_int_sync(struct bnx2 *bp)
624 {
625         int i;
626
627         atomic_inc(&bp->intr_sem);
628         if (!netif_running(bp->dev))
629                 return;
630
631         bnx2_disable_int(bp);
632         for (i = 0; i < bp->irq_nvecs; i++)
633                 synchronize_irq(bp->irq_tbl[i].vector);
634 }
635
636 static void
637 bnx2_napi_disable(struct bnx2 *bp)
638 {
639         int i;
640
641         for (i = 0; i < bp->irq_nvecs; i++)
642                 napi_disable(&bp->bnx2_napi[i].napi);
643 }
644
645 static void
646 bnx2_napi_enable(struct bnx2 *bp)
647 {
648         int i;
649
650         for (i = 0; i < bp->irq_nvecs; i++)
651                 napi_enable(&bp->bnx2_napi[i].napi);
652 }
653
654 static void
655 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
656 {
657         if (stop_cnic)
658                 bnx2_cnic_stop(bp);
659         if (netif_running(bp->dev)) {
660                 bnx2_napi_disable(bp);
661                 netif_tx_disable(bp->dev);
662         }
663         bnx2_disable_int_sync(bp);
664         netif_carrier_off(bp->dev);     /* prevent tx timeout */
665 }
666
667 static void
668 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
669 {
670         if (atomic_dec_and_test(&bp->intr_sem)) {
671                 if (netif_running(bp->dev)) {
672                         netif_tx_wake_all_queues(bp->dev);
673                         spin_lock_bh(&bp->phy_lock);
674                         if (bp->link_up)
675                                 netif_carrier_on(bp->dev);
676                         spin_unlock_bh(&bp->phy_lock);
677                         bnx2_napi_enable(bp);
678                         bnx2_enable_int(bp);
679                         if (start_cnic)
680                                 bnx2_cnic_start(bp);
681                 }
682         }
683 }
684
685 static void
686 bnx2_free_tx_mem(struct bnx2 *bp)
687 {
688         int i;
689
690         for (i = 0; i < bp->num_tx_rings; i++) {
691                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
692                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
693
694                 if (txr->tx_desc_ring) {
695                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
696                                             txr->tx_desc_ring,
697                                             txr->tx_desc_mapping);
698                         txr->tx_desc_ring = NULL;
699                 }
700                 kfree(txr->tx_buf_ring);
701                 txr->tx_buf_ring = NULL;
702         }
703 }
704
705 static void
706 bnx2_free_rx_mem(struct bnx2 *bp)
707 {
708         int i;
709
710         for (i = 0; i < bp->num_rx_rings; i++) {
711                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
712                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
713                 int j;
714
715                 for (j = 0; j < bp->rx_max_ring; j++) {
716                         if (rxr->rx_desc_ring[j])
717                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
718                                                     rxr->rx_desc_ring[j],
719                                                     rxr->rx_desc_mapping[j]);
720                         rxr->rx_desc_ring[j] = NULL;
721                 }
722                 vfree(rxr->rx_buf_ring);
723                 rxr->rx_buf_ring = NULL;
724
725                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
726                         if (rxr->rx_pg_desc_ring[j])
727                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
728                                                     rxr->rx_pg_desc_ring[j],
729                                                     rxr->rx_pg_desc_mapping[j]);
730                         rxr->rx_pg_desc_ring[j] = NULL;
731                 }
732                 vfree(rxr->rx_pg_ring);
733                 rxr->rx_pg_ring = NULL;
734         }
735 }
736
737 static int
738 bnx2_alloc_tx_mem(struct bnx2 *bp)
739 {
740         int i;
741
742         for (i = 0; i < bp->num_tx_rings; i++) {
743                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
744                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
745
746                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
747                 if (txr->tx_buf_ring == NULL)
748                         return -ENOMEM;
749
750                 txr->tx_desc_ring =
751                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
752                                              &txr->tx_desc_mapping);
753                 if (txr->tx_desc_ring == NULL)
754                         return -ENOMEM;
755         }
756         return 0;
757 }
758
759 static int
760 bnx2_alloc_rx_mem(struct bnx2 *bp)
761 {
762         int i;
763
764         for (i = 0; i < bp->num_rx_rings; i++) {
765                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
766                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
767                 int j;
768
769                 rxr->rx_buf_ring =
770                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
771                 if (rxr->rx_buf_ring == NULL)
772                         return -ENOMEM;
773
774                 memset(rxr->rx_buf_ring, 0,
775                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
776
777                 for (j = 0; j < bp->rx_max_ring; j++) {
778                         rxr->rx_desc_ring[j] =
779                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
780                                                      &rxr->rx_desc_mapping[j]);
781                         if (rxr->rx_desc_ring[j] == NULL)
782                                 return -ENOMEM;
783
784                 }
785
786                 if (bp->rx_pg_ring_size) {
787                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
788                                                   bp->rx_max_pg_ring);
789                         if (rxr->rx_pg_ring == NULL)
790                                 return -ENOMEM;
791
792                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
793                                bp->rx_max_pg_ring);
794                 }
795
796                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
797                         rxr->rx_pg_desc_ring[j] =
798                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
799                                                 &rxr->rx_pg_desc_mapping[j]);
800                         if (rxr->rx_pg_desc_ring[j] == NULL)
801                                 return -ENOMEM;
802
803                 }
804         }
805         return 0;
806 }
807
808 static void
809 bnx2_free_mem(struct bnx2 *bp)
810 {
811         int i;
812         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
813
814         bnx2_free_tx_mem(bp);
815         bnx2_free_rx_mem(bp);
816
817         for (i = 0; i < bp->ctx_pages; i++) {
818                 if (bp->ctx_blk[i]) {
819                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
820                                             bp->ctx_blk[i],
821                                             bp->ctx_blk_mapping[i]);
822                         bp->ctx_blk[i] = NULL;
823                 }
824         }
825         if (bnapi->status_blk.msi) {
826                 pci_free_consistent(bp->pdev, bp->status_stats_size,
827                                     bnapi->status_blk.msi,
828                                     bp->status_blk_mapping);
829                 bnapi->status_blk.msi = NULL;
830                 bp->stats_blk = NULL;
831         }
832 }
833
834 static int
835 bnx2_alloc_mem(struct bnx2 *bp)
836 {
837         int i, status_blk_size, err;
838         struct bnx2_napi *bnapi;
839         void *status_blk;
840
841         /* Combine status and statistics blocks into one allocation. */
842         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
843         if (bp->flags & BNX2_FLAG_MSIX_CAP)
844                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
845                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
846         bp->status_stats_size = status_blk_size +
847                                 sizeof(struct statistics_block);
848
849         status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
850                                           &bp->status_blk_mapping);
851         if (status_blk == NULL)
852                 goto alloc_mem_err;
853
854         memset(status_blk, 0, bp->status_stats_size);
855
856         bnapi = &bp->bnx2_napi[0];
857         bnapi->status_blk.msi = status_blk;
858         bnapi->hw_tx_cons_ptr =
859                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
860         bnapi->hw_rx_cons_ptr =
861                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
862         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
863                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
864                         struct status_block_msix *sblk;
865
866                         bnapi = &bp->bnx2_napi[i];
867
868                         sblk = (void *) (status_blk +
869                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
870                         bnapi->status_blk.msix = sblk;
871                         bnapi->hw_tx_cons_ptr =
872                                 &sblk->status_tx_quick_consumer_index;
873                         bnapi->hw_rx_cons_ptr =
874                                 &sblk->status_rx_quick_consumer_index;
875                         bnapi->int_num = i << 24;
876                 }
877         }
878
879         bp->stats_blk = status_blk + status_blk_size;
880
881         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
882
883         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
884                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
885                 if (bp->ctx_pages == 0)
886                         bp->ctx_pages = 1;
887                 for (i = 0; i < bp->ctx_pages; i++) {
888                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
889                                                 BCM_PAGE_SIZE,
890                                                 &bp->ctx_blk_mapping[i]);
891                         if (bp->ctx_blk[i] == NULL)
892                                 goto alloc_mem_err;
893                 }
894         }
895
896         err = bnx2_alloc_rx_mem(bp);
897         if (err)
898                 goto alloc_mem_err;
899
900         err = bnx2_alloc_tx_mem(bp);
901         if (err)
902                 goto alloc_mem_err;
903
904         return 0;
905
906 alloc_mem_err:
907         bnx2_free_mem(bp);
908         return -ENOMEM;
909 }
910
911 static void
912 bnx2_report_fw_link(struct bnx2 *bp)
913 {
914         u32 fw_link_status = 0;
915
916         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
917                 return;
918
919         if (bp->link_up) {
920                 u32 bmsr;
921
922                 switch (bp->line_speed) {
923                 case SPEED_10:
924                         if (bp->duplex == DUPLEX_HALF)
925                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
926                         else
927                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
928                         break;
929                 case SPEED_100:
930                         if (bp->duplex == DUPLEX_HALF)
931                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
932                         else
933                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
934                         break;
935                 case SPEED_1000:
936                         if (bp->duplex == DUPLEX_HALF)
937                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
938                         else
939                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
940                         break;
941                 case SPEED_2500:
942                         if (bp->duplex == DUPLEX_HALF)
943                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
944                         else
945                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
946                         break;
947                 }
948
949                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
950
951                 if (bp->autoneg) {
952                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
953
954                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
955                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
956
957                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
958                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
959                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
960                         else
961                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
962                 }
963         }
964         else
965                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
966
967         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
968 }
969
970 static char *
971 bnx2_xceiver_str(struct bnx2 *bp)
972 {
973         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
974                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
975                  "Copper"));
976 }
977
978 static void
979 bnx2_report_link(struct bnx2 *bp)
980 {
981         if (bp->link_up) {
982                 netif_carrier_on(bp->dev);
983                 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
984                             bnx2_xceiver_str(bp),
985                             bp->line_speed,
986                             bp->duplex == DUPLEX_FULL ? "full" : "half");
987
988                 if (bp->flow_ctrl) {
989                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
990                                 pr_cont(", receive ");
991                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
992                                         pr_cont("& transmit ");
993                         }
994                         else {
995                                 pr_cont(", transmit ");
996                         }
997                         pr_cont("flow control ON");
998                 }
999                 pr_cont("\n");
1000         } else {
1001                 netif_carrier_off(bp->dev);
1002                 netdev_err(bp->dev, "NIC %s Link is Down\n",
1003                            bnx2_xceiver_str(bp));
1004         }
1005
1006         bnx2_report_fw_link(bp);
1007 }
1008
1009 static void
1010 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1011 {
1012         u32 local_adv, remote_adv;
1013
1014         bp->flow_ctrl = 0;
1015         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1016                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1017
1018                 if (bp->duplex == DUPLEX_FULL) {
1019                         bp->flow_ctrl = bp->req_flow_ctrl;
1020                 }
1021                 return;
1022         }
1023
1024         if (bp->duplex != DUPLEX_FULL) {
1025                 return;
1026         }
1027
1028         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1029             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1030                 u32 val;
1031
1032                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1033                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1034                         bp->flow_ctrl |= FLOW_CTRL_TX;
1035                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1036                         bp->flow_ctrl |= FLOW_CTRL_RX;
1037                 return;
1038         }
1039
1040         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1041         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1042
1043         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1044                 u32 new_local_adv = 0;
1045                 u32 new_remote_adv = 0;
1046
1047                 if (local_adv & ADVERTISE_1000XPAUSE)
1048                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1049                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1050                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1051                 if (remote_adv & ADVERTISE_1000XPAUSE)
1052                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1053                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1054                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1055
1056                 local_adv = new_local_adv;
1057                 remote_adv = new_remote_adv;
1058         }
1059
1060         /* See Table 28B-3 of 802.3ab-1999 spec. */
1061         if (local_adv & ADVERTISE_PAUSE_CAP) {
1062                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1063                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1064                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1065                         }
1066                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1067                                 bp->flow_ctrl = FLOW_CTRL_RX;
1068                         }
1069                 }
1070                 else {
1071                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1072                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1073                         }
1074                 }
1075         }
1076         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1077                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1078                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1079
1080                         bp->flow_ctrl = FLOW_CTRL_TX;
1081                 }
1082         }
1083 }
1084
1085 static int
1086 bnx2_5709s_linkup(struct bnx2 *bp)
1087 {
1088         u32 val, speed;
1089
1090         bp->link_up = 1;
1091
1092         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1093         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1094         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1095
1096         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1097                 bp->line_speed = bp->req_line_speed;
1098                 bp->duplex = bp->req_duplex;
1099                 return 0;
1100         }
1101         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1102         switch (speed) {
1103                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1104                         bp->line_speed = SPEED_10;
1105                         break;
1106                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1107                         bp->line_speed = SPEED_100;
1108                         break;
1109                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1110                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1111                         bp->line_speed = SPEED_1000;
1112                         break;
1113                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1114                         bp->line_speed = SPEED_2500;
1115                         break;
1116         }
1117         if (val & MII_BNX2_GP_TOP_AN_FD)
1118                 bp->duplex = DUPLEX_FULL;
1119         else
1120                 bp->duplex = DUPLEX_HALF;
1121         return 0;
1122 }
1123
1124 static int
1125 bnx2_5708s_linkup(struct bnx2 *bp)
1126 {
1127         u32 val;
1128
1129         bp->link_up = 1;
1130         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1131         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1132                 case BCM5708S_1000X_STAT1_SPEED_10:
1133                         bp->line_speed = SPEED_10;
1134                         break;
1135                 case BCM5708S_1000X_STAT1_SPEED_100:
1136                         bp->line_speed = SPEED_100;
1137                         break;
1138                 case BCM5708S_1000X_STAT1_SPEED_1G:
1139                         bp->line_speed = SPEED_1000;
1140                         break;
1141                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1142                         bp->line_speed = SPEED_2500;
1143                         break;
1144         }
1145         if (val & BCM5708S_1000X_STAT1_FD)
1146                 bp->duplex = DUPLEX_FULL;
1147         else
1148                 bp->duplex = DUPLEX_HALF;
1149
1150         return 0;
1151 }
1152
1153 static int
1154 bnx2_5706s_linkup(struct bnx2 *bp)
1155 {
1156         u32 bmcr, local_adv, remote_adv, common;
1157
1158         bp->link_up = 1;
1159         bp->line_speed = SPEED_1000;
1160
1161         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1162         if (bmcr & BMCR_FULLDPLX) {
1163                 bp->duplex = DUPLEX_FULL;
1164         }
1165         else {
1166                 bp->duplex = DUPLEX_HALF;
1167         }
1168
1169         if (!(bmcr & BMCR_ANENABLE)) {
1170                 return 0;
1171         }
1172
1173         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1174         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1175
1176         common = local_adv & remote_adv;
1177         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1178
1179                 if (common & ADVERTISE_1000XFULL) {
1180                         bp->duplex = DUPLEX_FULL;
1181                 }
1182                 else {
1183                         bp->duplex = DUPLEX_HALF;
1184                 }
1185         }
1186
1187         return 0;
1188 }
1189
1190 static int
1191 bnx2_copper_linkup(struct bnx2 *bp)
1192 {
1193         u32 bmcr;
1194
1195         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1196         if (bmcr & BMCR_ANENABLE) {
1197                 u32 local_adv, remote_adv, common;
1198
1199                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1200                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1201
1202                 common = local_adv & (remote_adv >> 2);
1203                 if (common & ADVERTISE_1000FULL) {
1204                         bp->line_speed = SPEED_1000;
1205                         bp->duplex = DUPLEX_FULL;
1206                 }
1207                 else if (common & ADVERTISE_1000HALF) {
1208                         bp->line_speed = SPEED_1000;
1209                         bp->duplex = DUPLEX_HALF;
1210                 }
1211                 else {
1212                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1213                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1214
1215                         common = local_adv & remote_adv;
1216                         if (common & ADVERTISE_100FULL) {
1217                                 bp->line_speed = SPEED_100;
1218                                 bp->duplex = DUPLEX_FULL;
1219                         }
1220                         else if (common & ADVERTISE_100HALF) {
1221                                 bp->line_speed = SPEED_100;
1222                                 bp->duplex = DUPLEX_HALF;
1223                         }
1224                         else if (common & ADVERTISE_10FULL) {
1225                                 bp->line_speed = SPEED_10;
1226                                 bp->duplex = DUPLEX_FULL;
1227                         }
1228                         else if (common & ADVERTISE_10HALF) {
1229                                 bp->line_speed = SPEED_10;
1230                                 bp->duplex = DUPLEX_HALF;
1231                         }
1232                         else {
1233                                 bp->line_speed = 0;
1234                                 bp->link_up = 0;
1235                         }
1236                 }
1237         }
1238         else {
1239                 if (bmcr & BMCR_SPEED100) {
1240                         bp->line_speed = SPEED_100;
1241                 }
1242                 else {
1243                         bp->line_speed = SPEED_10;
1244                 }
1245                 if (bmcr & BMCR_FULLDPLX) {
1246                         bp->duplex = DUPLEX_FULL;
1247                 }
1248                 else {
1249                         bp->duplex = DUPLEX_HALF;
1250                 }
1251         }
1252
1253         return 0;
1254 }
1255
1256 static void
1257 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1258 {
1259         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1260
1261         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1262         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1263         val |= 0x02 << 8;
1264
1265         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1266                 u32 lo_water, hi_water;
1267
1268                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1269                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1270                 else
1271                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1272                 if (lo_water >= bp->rx_ring_size)
1273                         lo_water = 0;
1274
1275                 hi_water = min_t(int, bp->rx_ring_size / 4, lo_water + 16);
1276
1277                 if (hi_water <= lo_water)
1278                         lo_water = 0;
1279
1280                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1281                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1282
1283                 if (hi_water > 0xf)
1284                         hi_water = 0xf;
1285                 else if (hi_water == 0)
1286                         lo_water = 0;
1287                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1288         }
1289         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1290 }
1291
1292 static void
1293 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1294 {
1295         int i;
1296         u32 cid;
1297
1298         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1299                 if (i == 1)
1300                         cid = RX_RSS_CID;
1301                 bnx2_init_rx_context(bp, cid);
1302         }
1303 }
1304
1305 static void
1306 bnx2_set_mac_link(struct bnx2 *bp)
1307 {
1308         u32 val;
1309
1310         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1311         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1312                 (bp->duplex == DUPLEX_HALF)) {
1313                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1314         }
1315
1316         /* Configure the EMAC mode register. */
1317         val = REG_RD(bp, BNX2_EMAC_MODE);
1318
1319         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1320                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1321                 BNX2_EMAC_MODE_25G_MODE);
1322
1323         if (bp->link_up) {
1324                 switch (bp->line_speed) {
1325                         case SPEED_10:
1326                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1327                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1328                                         break;
1329                                 }
1330                                 /* fall through */
1331                         case SPEED_100:
1332                                 val |= BNX2_EMAC_MODE_PORT_MII;
1333                                 break;
1334                         case SPEED_2500:
1335                                 val |= BNX2_EMAC_MODE_25G_MODE;
1336                                 /* fall through */
1337                         case SPEED_1000:
1338                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1339                                 break;
1340                 }
1341         }
1342         else {
1343                 val |= BNX2_EMAC_MODE_PORT_GMII;
1344         }
1345
1346         /* Set the MAC to operate in the appropriate duplex mode. */
1347         if (bp->duplex == DUPLEX_HALF)
1348                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1349         REG_WR(bp, BNX2_EMAC_MODE, val);
1350
1351         /* Enable/disable rx PAUSE. */
1352         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1353
1354         if (bp->flow_ctrl & FLOW_CTRL_RX)
1355                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1356         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1357
1358         /* Enable/disable tx PAUSE. */
1359         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1360         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1361
1362         if (bp->flow_ctrl & FLOW_CTRL_TX)
1363                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1364         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1365
1366         /* Acknowledge the interrupt. */
1367         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1368
1369         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1370                 bnx2_init_all_rx_contexts(bp);
1371 }
1372
1373 static void
1374 bnx2_enable_bmsr1(struct bnx2 *bp)
1375 {
1376         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1377             (CHIP_NUM(bp) == CHIP_NUM_5709))
1378                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1379                                MII_BNX2_BLK_ADDR_GP_STATUS);
1380 }
1381
1382 static void
1383 bnx2_disable_bmsr1(struct bnx2 *bp)
1384 {
1385         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1386             (CHIP_NUM(bp) == CHIP_NUM_5709))
1387                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1388                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1389 }
1390
1391 static int
1392 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1393 {
1394         u32 up1;
1395         int ret = 1;
1396
1397         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1398                 return 0;
1399
1400         if (bp->autoneg & AUTONEG_SPEED)
1401                 bp->advertising |= ADVERTISED_2500baseX_Full;
1402
1403         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1404                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1405
1406         bnx2_read_phy(bp, bp->mii_up1, &up1);
1407         if (!(up1 & BCM5708S_UP1_2G5)) {
1408                 up1 |= BCM5708S_UP1_2G5;
1409                 bnx2_write_phy(bp, bp->mii_up1, up1);
1410                 ret = 0;
1411         }
1412
1413         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1414                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1415                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1416
1417         return ret;
1418 }
1419
1420 static int
1421 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1422 {
1423         u32 up1;
1424         int ret = 0;
1425
1426         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1427                 return 0;
1428
1429         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1430                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1431
1432         bnx2_read_phy(bp, bp->mii_up1, &up1);
1433         if (up1 & BCM5708S_UP1_2G5) {
1434                 up1 &= ~BCM5708S_UP1_2G5;
1435                 bnx2_write_phy(bp, bp->mii_up1, up1);
1436                 ret = 1;
1437         }
1438
1439         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1440                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1441                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1442
1443         return ret;
1444 }
1445
1446 static void
1447 bnx2_enable_forced_2g5(struct bnx2 *bp)
1448 {
1449         u32 uninitialized_var(bmcr);
1450         int err;
1451
1452         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1453                 return;
1454
1455         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1456                 u32 val;
1457
1458                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1459                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1460                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1461                         val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1462                         val |= MII_BNX2_SD_MISC1_FORCE |
1463                                 MII_BNX2_SD_MISC1_FORCE_2_5G;
1464                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1465                 }
1466
1467                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1468                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1469                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1470
1471         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1472                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1473                 if (!err)
1474                         bmcr |= BCM5708S_BMCR_FORCE_2500;
1475         } else {
1476                 return;
1477         }
1478
1479         if (err)
1480                 return;
1481
1482         if (bp->autoneg & AUTONEG_SPEED) {
1483                 bmcr &= ~BMCR_ANENABLE;
1484                 if (bp->req_duplex == DUPLEX_FULL)
1485                         bmcr |= BMCR_FULLDPLX;
1486         }
1487         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1488 }
1489
1490 static void
1491 bnx2_disable_forced_2g5(struct bnx2 *bp)
1492 {
1493         u32 uninitialized_var(bmcr);
1494         int err;
1495
1496         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1497                 return;
1498
1499         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1500                 u32 val;
1501
1502                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1503                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1504                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1505                         val &= ~MII_BNX2_SD_MISC1_FORCE;
1506                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1507                 }
1508
1509                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1510                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1511                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1512
1513         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1514                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1515                 if (!err)
1516                         bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1517         } else {
1518                 return;
1519         }
1520
1521         if (err)
1522                 return;
1523
1524         if (bp->autoneg & AUTONEG_SPEED)
1525                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1526         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1527 }
1528
1529 static void
1530 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1531 {
1532         u32 val;
1533
1534         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1535         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1536         if (start)
1537                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1538         else
1539                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1540 }
1541
1542 static int
1543 bnx2_set_link(struct bnx2 *bp)
1544 {
1545         u32 bmsr;
1546         u8 link_up;
1547
1548         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1549                 bp->link_up = 1;
1550                 return 0;
1551         }
1552
1553         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1554                 return 0;
1555
1556         link_up = bp->link_up;
1557
1558         bnx2_enable_bmsr1(bp);
1559         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1560         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1561         bnx2_disable_bmsr1(bp);
1562
1563         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1564             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1565                 u32 val, an_dbg;
1566
1567                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1568                         bnx2_5706s_force_link_dn(bp, 0);
1569                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1570                 }
1571                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1572
1573                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1574                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1575                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1576
1577                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1578                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1579                         bmsr |= BMSR_LSTATUS;
1580                 else
1581                         bmsr &= ~BMSR_LSTATUS;
1582         }
1583
1584         if (bmsr & BMSR_LSTATUS) {
1585                 bp->link_up = 1;
1586
1587                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1588                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1589                                 bnx2_5706s_linkup(bp);
1590                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1591                                 bnx2_5708s_linkup(bp);
1592                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1593                                 bnx2_5709s_linkup(bp);
1594                 }
1595                 else {
1596                         bnx2_copper_linkup(bp);
1597                 }
1598                 bnx2_resolve_flow_ctrl(bp);
1599         }
1600         else {
1601                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1602                     (bp->autoneg & AUTONEG_SPEED))
1603                         bnx2_disable_forced_2g5(bp);
1604
1605                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1606                         u32 bmcr;
1607
1608                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1609                         bmcr |= BMCR_ANENABLE;
1610                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1611
1612                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1613                 }
1614                 bp->link_up = 0;
1615         }
1616
1617         if (bp->link_up != link_up) {
1618                 bnx2_report_link(bp);
1619         }
1620
1621         bnx2_set_mac_link(bp);
1622
1623         return 0;
1624 }
1625
1626 static int
1627 bnx2_reset_phy(struct bnx2 *bp)
1628 {
1629         int i;
1630         u32 reg;
1631
1632         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1633
1634 #define PHY_RESET_MAX_WAIT 100
1635         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1636                 udelay(10);
1637
1638                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1639                 if (!(reg & BMCR_RESET)) {
1640                         udelay(20);
1641                         break;
1642                 }
1643         }
1644         if (i == PHY_RESET_MAX_WAIT) {
1645                 return -EBUSY;
1646         }
1647         return 0;
1648 }
1649
1650 static u32
1651 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1652 {
1653         u32 adv = 0;
1654
1655         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1656                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1657
1658                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1659                         adv = ADVERTISE_1000XPAUSE;
1660                 }
1661                 else {
1662                         adv = ADVERTISE_PAUSE_CAP;
1663                 }
1664         }
1665         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1666                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1667                         adv = ADVERTISE_1000XPSE_ASYM;
1668                 }
1669                 else {
1670                         adv = ADVERTISE_PAUSE_ASYM;
1671                 }
1672         }
1673         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1674                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1675                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1676                 }
1677                 else {
1678                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1679                 }
1680         }
1681         return adv;
1682 }
1683
1684 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1685
1686 static int
1687 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1688 __releases(&bp->phy_lock)
1689 __acquires(&bp->phy_lock)
1690 {
1691         u32 speed_arg = 0, pause_adv;
1692
1693         pause_adv = bnx2_phy_get_pause_adv(bp);
1694
1695         if (bp->autoneg & AUTONEG_SPEED) {
1696                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1697                 if (bp->advertising & ADVERTISED_10baseT_Half)
1698                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1699                 if (bp->advertising & ADVERTISED_10baseT_Full)
1700                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1701                 if (bp->advertising & ADVERTISED_100baseT_Half)
1702                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1703                 if (bp->advertising & ADVERTISED_100baseT_Full)
1704                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1705                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1706                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1707                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1708                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1709         } else {
1710                 if (bp->req_line_speed == SPEED_2500)
1711                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1712                 else if (bp->req_line_speed == SPEED_1000)
1713                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1714                 else if (bp->req_line_speed == SPEED_100) {
1715                         if (bp->req_duplex == DUPLEX_FULL)
1716                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1717                         else
1718                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1719                 } else if (bp->req_line_speed == SPEED_10) {
1720                         if (bp->req_duplex == DUPLEX_FULL)
1721                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1722                         else
1723                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1724                 }
1725         }
1726
1727         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1728                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1729         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1730                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1731
1732         if (port == PORT_TP)
1733                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1734                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1735
1736         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1737
1738         spin_unlock_bh(&bp->phy_lock);
1739         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1740         spin_lock_bh(&bp->phy_lock);
1741
1742         return 0;
1743 }
1744
1745 static int
1746 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1747 __releases(&bp->phy_lock)
1748 __acquires(&bp->phy_lock)
1749 {
1750         u32 adv, bmcr;
1751         u32 new_adv = 0;
1752
1753         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1754                 return (bnx2_setup_remote_phy(bp, port));
1755
1756         if (!(bp->autoneg & AUTONEG_SPEED)) {
1757                 u32 new_bmcr;
1758                 int force_link_down = 0;
1759
1760                 if (bp->req_line_speed == SPEED_2500) {
1761                         if (!bnx2_test_and_enable_2g5(bp))
1762                                 force_link_down = 1;
1763                 } else if (bp->req_line_speed == SPEED_1000) {
1764                         if (bnx2_test_and_disable_2g5(bp))
1765                                 force_link_down = 1;
1766                 }
1767                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1768                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1769
1770                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1771                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1772                 new_bmcr |= BMCR_SPEED1000;
1773
1774                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1775                         if (bp->req_line_speed == SPEED_2500)
1776                                 bnx2_enable_forced_2g5(bp);
1777                         else if (bp->req_line_speed == SPEED_1000) {
1778                                 bnx2_disable_forced_2g5(bp);
1779                                 new_bmcr &= ~0x2000;
1780                         }
1781
1782                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1783                         if (bp->req_line_speed == SPEED_2500)
1784                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1785                         else
1786                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1787                 }
1788
1789                 if (bp->req_duplex == DUPLEX_FULL) {
1790                         adv |= ADVERTISE_1000XFULL;
1791                         new_bmcr |= BMCR_FULLDPLX;
1792                 }
1793                 else {
1794                         adv |= ADVERTISE_1000XHALF;
1795                         new_bmcr &= ~BMCR_FULLDPLX;
1796                 }
1797                 if ((new_bmcr != bmcr) || (force_link_down)) {
1798                         /* Force a link down visible on the other side */
1799                         if (bp->link_up) {
1800                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1801                                                ~(ADVERTISE_1000XFULL |
1802                                                  ADVERTISE_1000XHALF));
1803                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1804                                         BMCR_ANRESTART | BMCR_ANENABLE);
1805
1806                                 bp->link_up = 0;
1807                                 netif_carrier_off(bp->dev);
1808                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1809                                 bnx2_report_link(bp);
1810                         }
1811                         bnx2_write_phy(bp, bp->mii_adv, adv);
1812                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1813                 } else {
1814                         bnx2_resolve_flow_ctrl(bp);
1815                         bnx2_set_mac_link(bp);
1816                 }
1817                 return 0;
1818         }
1819
1820         bnx2_test_and_enable_2g5(bp);
1821
1822         if (bp->advertising & ADVERTISED_1000baseT_Full)
1823                 new_adv |= ADVERTISE_1000XFULL;
1824
1825         new_adv |= bnx2_phy_get_pause_adv(bp);
1826
1827         bnx2_read_phy(bp, bp->mii_adv, &adv);
1828         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1829
1830         bp->serdes_an_pending = 0;
1831         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1832                 /* Force a link down visible on the other side */
1833                 if (bp->link_up) {
1834                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1835                         spin_unlock_bh(&bp->phy_lock);
1836                         msleep(20);
1837                         spin_lock_bh(&bp->phy_lock);
1838                 }
1839
1840                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1841                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1842                         BMCR_ANENABLE);
1843                 /* Speed up link-up time when the link partner
1844                  * does not autonegotiate which is very common
1845                  * in blade servers. Some blade servers use
1846                  * IPMI for kerboard input and it's important
1847                  * to minimize link disruptions. Autoneg. involves
1848                  * exchanging base pages plus 3 next pages and
1849                  * normally completes in about 120 msec.
1850                  */
1851                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1852                 bp->serdes_an_pending = 1;
1853                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1854         } else {
1855                 bnx2_resolve_flow_ctrl(bp);
1856                 bnx2_set_mac_link(bp);
1857         }
1858
1859         return 0;
1860 }
1861
1862 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1863         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1864                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1865                 (ADVERTISED_1000baseT_Full)
1866
1867 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1868         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1869         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1870         ADVERTISED_1000baseT_Full)
1871
1872 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1873         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1874
1875 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1876
1877 static void
1878 bnx2_set_default_remote_link(struct bnx2 *bp)
1879 {
1880         u32 link;
1881
1882         if (bp->phy_port == PORT_TP)
1883                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1884         else
1885                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1886
1887         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1888                 bp->req_line_speed = 0;
1889                 bp->autoneg |= AUTONEG_SPEED;
1890                 bp->advertising = ADVERTISED_Autoneg;
1891                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1892                         bp->advertising |= ADVERTISED_10baseT_Half;
1893                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1894                         bp->advertising |= ADVERTISED_10baseT_Full;
1895                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1896                         bp->advertising |= ADVERTISED_100baseT_Half;
1897                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1898                         bp->advertising |= ADVERTISED_100baseT_Full;
1899                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1900                         bp->advertising |= ADVERTISED_1000baseT_Full;
1901                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1902                         bp->advertising |= ADVERTISED_2500baseX_Full;
1903         } else {
1904                 bp->autoneg = 0;
1905                 bp->advertising = 0;
1906                 bp->req_duplex = DUPLEX_FULL;
1907                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1908                         bp->req_line_speed = SPEED_10;
1909                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1910                                 bp->req_duplex = DUPLEX_HALF;
1911                 }
1912                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1913                         bp->req_line_speed = SPEED_100;
1914                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1915                                 bp->req_duplex = DUPLEX_HALF;
1916                 }
1917                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1918                         bp->req_line_speed = SPEED_1000;
1919                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1920                         bp->req_line_speed = SPEED_2500;
1921         }
1922 }
1923
1924 static void
1925 bnx2_set_default_link(struct bnx2 *bp)
1926 {
1927         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1928                 bnx2_set_default_remote_link(bp);
1929                 return;
1930         }
1931
1932         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1933         bp->req_line_speed = 0;
1934         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1935                 u32 reg;
1936
1937                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1938
1939                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1940                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1941                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1942                         bp->autoneg = 0;
1943                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1944                         bp->req_duplex = DUPLEX_FULL;
1945                 }
1946         } else
1947                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1948 }
1949
1950 static void
1951 bnx2_send_heart_beat(struct bnx2 *bp)
1952 {
1953         u32 msg;
1954         u32 addr;
1955
1956         spin_lock(&bp->indirect_lock);
1957         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1958         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1959         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1960         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1961         spin_unlock(&bp->indirect_lock);
1962 }
1963
1964 static void
1965 bnx2_remote_phy_event(struct bnx2 *bp)
1966 {
1967         u32 msg;
1968         u8 link_up = bp->link_up;
1969         u8 old_port;
1970
1971         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1972
1973         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1974                 bnx2_send_heart_beat(bp);
1975
1976         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1977
1978         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1979                 bp->link_up = 0;
1980         else {
1981                 u32 speed;
1982
1983                 bp->link_up = 1;
1984                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1985                 bp->duplex = DUPLEX_FULL;
1986                 switch (speed) {
1987                         case BNX2_LINK_STATUS_10HALF:
1988                                 bp->duplex = DUPLEX_HALF;
1989                         case BNX2_LINK_STATUS_10FULL:
1990                                 bp->line_speed = SPEED_10;
1991                                 break;
1992                         case BNX2_LINK_STATUS_100HALF:
1993                                 bp->duplex = DUPLEX_HALF;
1994                         case BNX2_LINK_STATUS_100BASE_T4:
1995                         case BNX2_LINK_STATUS_100FULL:
1996                                 bp->line_speed = SPEED_100;
1997                                 break;
1998                         case BNX2_LINK_STATUS_1000HALF:
1999                                 bp->duplex = DUPLEX_HALF;
2000                         case BNX2_LINK_STATUS_1000FULL:
2001                                 bp->line_speed = SPEED_1000;
2002                                 break;
2003                         case BNX2_LINK_STATUS_2500HALF:
2004                                 bp->duplex = DUPLEX_HALF;
2005                         case BNX2_LINK_STATUS_2500FULL:
2006                                 bp->line_speed = SPEED_2500;
2007                                 break;
2008                         default:
2009                                 bp->line_speed = 0;
2010                                 break;
2011                 }
2012
2013                 bp->flow_ctrl = 0;
2014                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2015                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2016                         if (bp->duplex == DUPLEX_FULL)
2017                                 bp->flow_ctrl = bp->req_flow_ctrl;
2018                 } else {
2019                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2020                                 bp->flow_ctrl |= FLOW_CTRL_TX;
2021                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2022                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2023                 }
2024
2025                 old_port = bp->phy_port;
2026                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2027                         bp->phy_port = PORT_FIBRE;
2028                 else
2029                         bp->phy_port = PORT_TP;
2030
2031                 if (old_port != bp->phy_port)
2032                         bnx2_set_default_link(bp);
2033
2034         }
2035         if (bp->link_up != link_up)
2036                 bnx2_report_link(bp);
2037
2038         bnx2_set_mac_link(bp);
2039 }
2040
2041 static int
2042 bnx2_set_remote_link(struct bnx2 *bp)
2043 {
2044         u32 evt_code;
2045
2046         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2047         switch (evt_code) {
2048                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2049                         bnx2_remote_phy_event(bp);
2050                         break;
2051                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2052                 default:
2053                         bnx2_send_heart_beat(bp);
2054                         break;
2055         }
2056         return 0;
2057 }
2058
2059 static int
2060 bnx2_setup_copper_phy(struct bnx2 *bp)
2061 __releases(&bp->phy_lock)
2062 __acquires(&bp->phy_lock)
2063 {
2064         u32 bmcr;
2065         u32 new_bmcr;
2066
2067         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2068
2069         if (bp->autoneg & AUTONEG_SPEED) {
2070                 u32 adv_reg, adv1000_reg;
2071                 u32 new_adv_reg = 0;
2072                 u32 new_adv1000_reg = 0;
2073
2074                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2075                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2076                         ADVERTISE_PAUSE_ASYM);
2077
2078                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2079                 adv1000_reg &= PHY_ALL_1000_SPEED;
2080
2081                 if (bp->advertising & ADVERTISED_10baseT_Half)
2082                         new_adv_reg |= ADVERTISE_10HALF;
2083                 if (bp->advertising & ADVERTISED_10baseT_Full)
2084                         new_adv_reg |= ADVERTISE_10FULL;
2085                 if (bp->advertising & ADVERTISED_100baseT_Half)
2086                         new_adv_reg |= ADVERTISE_100HALF;
2087                 if (bp->advertising & ADVERTISED_100baseT_Full)
2088                         new_adv_reg |= ADVERTISE_100FULL;
2089                 if (bp->advertising & ADVERTISED_1000baseT_Full)
2090                         new_adv1000_reg |= ADVERTISE_1000FULL;
2091
2092                 new_adv_reg |= ADVERTISE_CSMA;
2093
2094                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2095
2096                 if ((adv1000_reg != new_adv1000_reg) ||
2097                         (adv_reg != new_adv_reg) ||
2098                         ((bmcr & BMCR_ANENABLE) == 0)) {
2099
2100                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2101                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2102                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2103                                 BMCR_ANENABLE);
2104                 }
2105                 else if (bp->link_up) {
2106                         /* Flow ctrl may have changed from auto to forced */
2107                         /* or vice-versa. */
2108
2109                         bnx2_resolve_flow_ctrl(bp);
2110                         bnx2_set_mac_link(bp);
2111                 }
2112                 return 0;
2113         }
2114
2115         new_bmcr = 0;
2116         if (bp->req_line_speed == SPEED_100) {
2117                 new_bmcr |= BMCR_SPEED100;
2118         }
2119         if (bp->req_duplex == DUPLEX_FULL) {
2120                 new_bmcr |= BMCR_FULLDPLX;
2121         }
2122         if (new_bmcr != bmcr) {
2123                 u32 bmsr;
2124
2125                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2126                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2127
2128                 if (bmsr & BMSR_LSTATUS) {
2129                         /* Force link down */
2130                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2131                         spin_unlock_bh(&bp->phy_lock);
2132                         msleep(50);
2133                         spin_lock_bh(&bp->phy_lock);
2134
2135                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2136                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2137                 }
2138
2139                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2140
2141                 /* Normally, the new speed is setup after the link has
2142                  * gone down and up again. In some cases, link will not go
2143                  * down so we need to set up the new speed here.
2144                  */
2145                 if (bmsr & BMSR_LSTATUS) {
2146                         bp->line_speed = bp->req_line_speed;
2147                         bp->duplex = bp->req_duplex;
2148                         bnx2_resolve_flow_ctrl(bp);
2149                         bnx2_set_mac_link(bp);
2150                 }
2151         } else {
2152                 bnx2_resolve_flow_ctrl(bp);
2153                 bnx2_set_mac_link(bp);
2154         }
2155         return 0;
2156 }
2157
2158 static int
2159 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2160 __releases(&bp->phy_lock)
2161 __acquires(&bp->phy_lock)
2162 {
2163         if (bp->loopback == MAC_LOOPBACK)
2164                 return 0;
2165
2166         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2167                 return (bnx2_setup_serdes_phy(bp, port));
2168         }
2169         else {
2170                 return (bnx2_setup_copper_phy(bp));
2171         }
2172 }
2173
2174 static int
2175 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2176 {
2177         u32 val;
2178
2179         bp->mii_bmcr = MII_BMCR + 0x10;
2180         bp->mii_bmsr = MII_BMSR + 0x10;
2181         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2182         bp->mii_adv = MII_ADVERTISE + 0x10;
2183         bp->mii_lpa = MII_LPA + 0x10;
2184         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2185
2186         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2187         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2188
2189         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2190         if (reset_phy)
2191                 bnx2_reset_phy(bp);
2192
2193         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2194
2195         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2196         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2197         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2198         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2199
2200         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2201         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2202         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2203                 val |= BCM5708S_UP1_2G5;
2204         else
2205                 val &= ~BCM5708S_UP1_2G5;
2206         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2207
2208         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2209         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2210         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2211         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2212
2213         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2214
2215         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2216               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2217         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2218
2219         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2220
2221         return 0;
2222 }
2223
2224 static int
2225 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2226 {
2227         u32 val;
2228
2229         if (reset_phy)
2230                 bnx2_reset_phy(bp);
2231
2232         bp->mii_up1 = BCM5708S_UP1;
2233
2234         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2235         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2236         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2237
2238         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2239         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2240         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2241
2242         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2243         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2244         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2245
2246         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2247                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2248                 val |= BCM5708S_UP1_2G5;
2249                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2250         }
2251
2252         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2253             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2254             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2255                 /* increase tx signal amplitude */
2256                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2257                                BCM5708S_BLK_ADDR_TX_MISC);
2258                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2259                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2260                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2261                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2262         }
2263
2264         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2265               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2266
2267         if (val) {
2268                 u32 is_backplane;
2269
2270                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2271                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2272                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2273                                        BCM5708S_BLK_ADDR_TX_MISC);
2274                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2275                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2276                                        BCM5708S_BLK_ADDR_DIG);
2277                 }
2278         }
2279         return 0;
2280 }
2281
2282 static int
2283 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2284 {
2285         if (reset_phy)
2286                 bnx2_reset_phy(bp);
2287
2288         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2289
2290         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2291                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2292
2293         if (bp->dev->mtu > 1500) {
2294                 u32 val;
2295
2296                 /* Set extended packet length bit */
2297                 bnx2_write_phy(bp, 0x18, 0x7);
2298                 bnx2_read_phy(bp, 0x18, &val);
2299                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2300
2301                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2302                 bnx2_read_phy(bp, 0x1c, &val);
2303                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2304         }
2305         else {
2306                 u32 val;
2307
2308                 bnx2_write_phy(bp, 0x18, 0x7);
2309                 bnx2_read_phy(bp, 0x18, &val);
2310                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2311
2312                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2313                 bnx2_read_phy(bp, 0x1c, &val);
2314                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2315         }
2316
2317         return 0;
2318 }
2319
2320 static int
2321 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2322 {
2323         u32 val;
2324
2325         if (reset_phy)
2326                 bnx2_reset_phy(bp);
2327
2328         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2329                 bnx2_write_phy(bp, 0x18, 0x0c00);
2330                 bnx2_write_phy(bp, 0x17, 0x000a);
2331                 bnx2_write_phy(bp, 0x15, 0x310b);
2332                 bnx2_write_phy(bp, 0x17, 0x201f);
2333                 bnx2_write_phy(bp, 0x15, 0x9506);
2334                 bnx2_write_phy(bp, 0x17, 0x401f);
2335                 bnx2_write_phy(bp, 0x15, 0x14e2);
2336                 bnx2_write_phy(bp, 0x18, 0x0400);
2337         }
2338
2339         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2340                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2341                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2342                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2343                 val &= ~(1 << 8);
2344                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2345         }
2346
2347         if (bp->dev->mtu > 1500) {
2348                 /* Set extended packet length bit */
2349                 bnx2_write_phy(bp, 0x18, 0x7);
2350                 bnx2_read_phy(bp, 0x18, &val);
2351                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2352
2353                 bnx2_read_phy(bp, 0x10, &val);
2354                 bnx2_write_phy(bp, 0x10, val | 0x1);
2355         }
2356         else {
2357                 bnx2_write_phy(bp, 0x18, 0x7);
2358                 bnx2_read_phy(bp, 0x18, &val);
2359                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2360
2361                 bnx2_read_phy(bp, 0x10, &val);
2362                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2363         }
2364
2365         /* ethernet@wirespeed */
2366         bnx2_write_phy(bp, 0x18, 0x7007);
2367         bnx2_read_phy(bp, 0x18, &val);
2368         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2369         return 0;
2370 }
2371
2372
2373 static int
2374 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2375 __releases(&bp->phy_lock)
2376 __acquires(&bp->phy_lock)
2377 {
2378         u32 val;
2379         int rc = 0;
2380
2381         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2382         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2383
2384         bp->mii_bmcr = MII_BMCR;
2385         bp->mii_bmsr = MII_BMSR;
2386         bp->mii_bmsr1 = MII_BMSR;
2387         bp->mii_adv = MII_ADVERTISE;
2388         bp->mii_lpa = MII_LPA;
2389
2390         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2391
2392         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2393                 goto setup_phy;
2394
2395         bnx2_read_phy(bp, MII_PHYSID1, &val);
2396         bp->phy_id = val << 16;
2397         bnx2_read_phy(bp, MII_PHYSID2, &val);
2398         bp->phy_id |= val & 0xffff;
2399
2400         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2401                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2402                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2403                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2404                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2405                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2406                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2407         }
2408         else {
2409                 rc = bnx2_init_copper_phy(bp, reset_phy);
2410         }
2411
2412 setup_phy:
2413         if (!rc)
2414                 rc = bnx2_setup_phy(bp, bp->phy_port);
2415
2416         return rc;
2417 }
2418
2419 static int
2420 bnx2_set_mac_loopback(struct bnx2 *bp)
2421 {
2422         u32 mac_mode;
2423
2424         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2425         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2426         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2427         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2428         bp->link_up = 1;
2429         return 0;
2430 }
2431
2432 static int bnx2_test_link(struct bnx2 *);
2433
2434 static int
2435 bnx2_set_phy_loopback(struct bnx2 *bp)
2436 {
2437         u32 mac_mode;
2438         int rc, i;
2439
2440         spin_lock_bh(&bp->phy_lock);
2441         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2442                             BMCR_SPEED1000);
2443         spin_unlock_bh(&bp->phy_lock);
2444         if (rc)
2445                 return rc;
2446
2447         for (i = 0; i < 10; i++) {
2448                 if (bnx2_test_link(bp) == 0)
2449                         break;
2450                 msleep(100);
2451         }
2452
2453         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2454         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2455                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2456                       BNX2_EMAC_MODE_25G_MODE);
2457
2458         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2459         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2460         bp->link_up = 1;
2461         return 0;
2462 }
2463
2464 static int
2465 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2466 {
2467         int i;
2468         u32 val;
2469
2470         bp->fw_wr_seq++;
2471         msg_data |= bp->fw_wr_seq;
2472
2473         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2474
2475         if (!ack)
2476                 return 0;
2477
2478         /* wait for an acknowledgement. */
2479         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2480                 msleep(10);
2481
2482                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2483
2484                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2485                         break;
2486         }
2487         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2488                 return 0;
2489
2490         /* If we timed out, inform the firmware that this is the case. */
2491         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2492                 if (!silent)
2493                         pr_err("fw sync timeout, reset code = %x\n", msg_data);
2494
2495                 msg_data &= ~BNX2_DRV_MSG_CODE;
2496                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2497
2498                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2499
2500                 return -EBUSY;
2501         }
2502
2503         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2504                 return -EIO;
2505
2506         return 0;
2507 }
2508
2509 static int
2510 bnx2_init_5709_context(struct bnx2 *bp)
2511 {
2512         int i, ret = 0;
2513         u32 val;
2514
2515         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2516         val |= (BCM_PAGE_BITS - 8) << 16;
2517         REG_WR(bp, BNX2_CTX_COMMAND, val);
2518         for (i = 0; i < 10; i++) {
2519                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2520                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2521                         break;
2522                 udelay(2);
2523         }
2524         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2525                 return -EBUSY;
2526
2527         for (i = 0; i < bp->ctx_pages; i++) {
2528                 int j;
2529
2530                 if (bp->ctx_blk[i])
2531                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2532                 else
2533                         return -ENOMEM;
2534
2535                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2536                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2537                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2538                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2539                        (u64) bp->ctx_blk_mapping[i] >> 32);
2540                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2541                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2542                 for (j = 0; j < 10; j++) {
2543
2544                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2545                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2546                                 break;
2547                         udelay(5);
2548                 }
2549                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2550                         ret = -EBUSY;
2551                         break;
2552                 }
2553         }
2554         return ret;
2555 }
2556
2557 static void
2558 bnx2_init_context(struct bnx2 *bp)
2559 {
2560         u32 vcid;
2561
2562         vcid = 96;
2563         while (vcid) {
2564                 u32 vcid_addr, pcid_addr, offset;
2565                 int i;
2566
2567                 vcid--;
2568
2569                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2570                         u32 new_vcid;
2571
2572                         vcid_addr = GET_PCID_ADDR(vcid);
2573                         if (vcid & 0x8) {
2574                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2575                         }
2576                         else {
2577                                 new_vcid = vcid;
2578                         }
2579                         pcid_addr = GET_PCID_ADDR(new_vcid);
2580                 }
2581                 else {
2582                         vcid_addr = GET_CID_ADDR(vcid);
2583                         pcid_addr = vcid_addr;
2584                 }
2585
2586                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2587                         vcid_addr += (i << PHY_CTX_SHIFT);
2588                         pcid_addr += (i << PHY_CTX_SHIFT);
2589
2590                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2591                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2592
2593                         /* Zero out the context. */
2594                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2595                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2596                 }
2597         }
2598 }
2599
2600 static int
2601 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2602 {
2603         u16 *good_mbuf;
2604         u32 good_mbuf_cnt;
2605         u32 val;
2606
2607         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2608         if (good_mbuf == NULL) {
2609                 pr_err("Failed to allocate memory in %s\n", __func__);
2610                 return -ENOMEM;
2611         }
2612
2613         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2614                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2615
2616         good_mbuf_cnt = 0;
2617
2618         /* Allocate a bunch of mbufs and save the good ones in an array. */
2619         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2620         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2621                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2622                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2623
2624                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2625
2626                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2627
2628                 /* The addresses with Bit 9 set are bad memory blocks. */
2629                 if (!(val & (1 << 9))) {
2630                         good_mbuf[good_mbuf_cnt] = (u16) val;
2631                         good_mbuf_cnt++;
2632                 }
2633
2634                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2635         }
2636
2637         /* Free the good ones back to the mbuf pool thus discarding
2638          * all the bad ones. */
2639         while (good_mbuf_cnt) {
2640                 good_mbuf_cnt--;
2641
2642                 val = good_mbuf[good_mbuf_cnt];
2643                 val = (val << 9) | val | 1;
2644
2645                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2646         }
2647         kfree(good_mbuf);
2648         return 0;
2649 }
2650
2651 static void
2652 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2653 {
2654         u32 val;
2655
2656         val = (mac_addr[0] << 8) | mac_addr[1];
2657
2658         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2659
2660         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2661                 (mac_addr[4] << 8) | mac_addr[5];
2662
2663         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2664 }
2665
2666 static inline int
2667 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2668 {
2669         dma_addr_t mapping;
2670         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2671         struct rx_bd *rxbd =
2672                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2673         struct page *page = alloc_page(GFP_ATOMIC);
2674
2675         if (!page)
2676                 return -ENOMEM;
2677         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2678                                PCI_DMA_FROMDEVICE);
2679         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2680                 __free_page(page);
2681                 return -EIO;
2682         }
2683
2684         rx_pg->page = page;
2685         dma_unmap_addr_set(rx_pg, mapping, mapping);
2686         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2687         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2688         return 0;
2689 }
2690
2691 static void
2692 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2693 {
2694         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2695         struct page *page = rx_pg->page;
2696
2697         if (!page)
2698                 return;
2699
2700         pci_unmap_page(bp->pdev, dma_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2701                        PCI_DMA_FROMDEVICE);
2702
2703         __free_page(page);
2704         rx_pg->page = NULL;
2705 }
2706
2707 static inline int
2708 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2709 {
2710         struct sk_buff *skb;
2711         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2712         dma_addr_t mapping;
2713         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2714         unsigned long align;
2715
2716         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2717         if (skb == NULL) {
2718                 return -ENOMEM;
2719         }
2720
2721         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2722                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2723
2724         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2725                 PCI_DMA_FROMDEVICE);
2726         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2727                 dev_kfree_skb(skb);
2728                 return -EIO;
2729         }
2730
2731         rx_buf->skb = skb;
2732         rx_buf->desc = (struct l2_fhdr *) skb->data;
2733         dma_unmap_addr_set(rx_buf, mapping, mapping);
2734
2735         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2736         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2737
2738         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2739
2740         return 0;
2741 }
2742
2743 static int
2744 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2745 {
2746         struct status_block *sblk = bnapi->status_blk.msi;
2747         u32 new_link_state, old_link_state;
2748         int is_set = 1;
2749
2750         new_link_state = sblk->status_attn_bits & event;
2751         old_link_state = sblk->status_attn_bits_ack & event;
2752         if (new_link_state != old_link_state) {
2753                 if (new_link_state)
2754                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2755                 else
2756                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2757         } else
2758                 is_set = 0;
2759
2760         return is_set;
2761 }
2762
2763 static void
2764 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2765 {
2766         spin_lock(&bp->phy_lock);
2767
2768         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2769                 bnx2_set_link(bp);
2770         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2771                 bnx2_set_remote_link(bp);
2772
2773         spin_unlock(&bp->phy_lock);
2774
2775 }
2776
2777 static inline u16
2778 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2779 {
2780         u16 cons;
2781
2782         /* Tell compiler that status block fields can change. */
2783         barrier();
2784         cons = *bnapi->hw_tx_cons_ptr;
2785         barrier();
2786         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2787                 cons++;
2788         return cons;
2789 }
2790
2791 static int
2792 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2793 {
2794         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2795         u16 hw_cons, sw_cons, sw_ring_cons;
2796         int tx_pkt = 0, index;
2797         struct netdev_queue *txq;
2798
2799         index = (bnapi - bp->bnx2_napi);
2800         txq = netdev_get_tx_queue(bp->dev, index);
2801
2802         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2803         sw_cons = txr->tx_cons;
2804
2805         while (sw_cons != hw_cons) {
2806                 struct sw_tx_bd *tx_buf;
2807                 struct sk_buff *skb;
2808                 int i, last;
2809
2810                 sw_ring_cons = TX_RING_IDX(sw_cons);
2811
2812                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2813                 skb = tx_buf->skb;
2814
2815                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2816                 prefetch(&skb->end);
2817
2818                 /* partial BD completions possible with TSO packets */
2819                 if (tx_buf->is_gso) {
2820                         u16 last_idx, last_ring_idx;
2821
2822                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2823                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2824                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2825                                 last_idx++;
2826                         }
2827                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2828                                 break;
2829                         }
2830                 }
2831
2832                 pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
2833                         skb_headlen(skb), PCI_DMA_TODEVICE);
2834
2835                 tx_buf->skb = NULL;
2836                 last = tx_buf->nr_frags;
2837
2838                 for (i = 0; i < last; i++) {
2839                         sw_cons = NEXT_TX_BD(sw_cons);
2840
2841                         pci_unmap_page(bp->pdev,
2842                                 dma_unmap_addr(
2843                                         &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2844                                         mapping),
2845                                 skb_shinfo(skb)->frags[i].size,
2846                                 PCI_DMA_TODEVICE);
2847                 }
2848
2849                 sw_cons = NEXT_TX_BD(sw_cons);
2850
2851                 dev_kfree_skb(skb);
2852                 tx_pkt++;
2853                 if (tx_pkt == budget)
2854                         break;
2855
2856                 if (hw_cons == sw_cons)
2857                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2858         }
2859
2860         txr->hw_tx_cons = hw_cons;
2861         txr->tx_cons = sw_cons;
2862
2863         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2864          * before checking for netif_tx_queue_stopped().  Without the
2865          * memory barrier, there is a small possibility that bnx2_start_xmit()
2866          * will miss it and cause the queue to be stopped forever.
2867          */
2868         smp_mb();
2869
2870         if (unlikely(netif_tx_queue_stopped(txq)) &&
2871                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2872                 __netif_tx_lock(txq, smp_processor_id());
2873                 if ((netif_tx_queue_stopped(txq)) &&
2874                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2875                         netif_tx_wake_queue(txq);
2876                 __netif_tx_unlock(txq);
2877         }
2878
2879         return tx_pkt;
2880 }
2881
2882 static void
2883 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2884                         struct sk_buff *skb, int count)
2885 {
2886         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2887         struct rx_bd *cons_bd, *prod_bd;
2888         int i;
2889         u16 hw_prod, prod;
2890         u16 cons = rxr->rx_pg_cons;
2891
2892         cons_rx_pg = &rxr->rx_pg_ring[cons];
2893
2894         /* The caller was unable to allocate a new page to replace the
2895          * last one in the frags array, so we need to recycle that page
2896          * and then free the skb.
2897          */
2898         if (skb) {
2899                 struct page *page;
2900                 struct skb_shared_info *shinfo;
2901
2902                 shinfo = skb_shinfo(skb);
2903                 shinfo->nr_frags--;
2904                 page = shinfo->frags[shinfo->nr_frags].page;
2905                 shinfo->frags[shinfo->nr_frags].page = NULL;
2906
2907                 cons_rx_pg->page = page;
2908                 dev_kfree_skb(skb);
2909         }
2910
2911         hw_prod = rxr->rx_pg_prod;
2912
2913         for (i = 0; i < count; i++) {
2914                 prod = RX_PG_RING_IDX(hw_prod);
2915
2916                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2917                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2918                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2919                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2920
2921                 if (prod != cons) {
2922                         prod_rx_pg->page = cons_rx_pg->page;
2923                         cons_rx_pg->page = NULL;
2924                         dma_unmap_addr_set(prod_rx_pg, mapping,
2925                                 dma_unmap_addr(cons_rx_pg, mapping));
2926
2927                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2928                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2929
2930                 }
2931                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2932                 hw_prod = NEXT_RX_BD(hw_prod);
2933         }
2934         rxr->rx_pg_prod = hw_prod;
2935         rxr->rx_pg_cons = cons;
2936 }
2937
2938 static inline void
2939 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2940                   struct sk_buff *skb, u16 cons, u16 prod)
2941 {
2942         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2943         struct rx_bd *cons_bd, *prod_bd;
2944
2945         cons_rx_buf = &rxr->rx_buf_ring[cons];
2946         prod_rx_buf = &rxr->rx_buf_ring[prod];
2947
2948         pci_dma_sync_single_for_device(bp->pdev,
2949                 dma_unmap_addr(cons_rx_buf, mapping),
2950                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2951
2952         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2953
2954         prod_rx_buf->skb = skb;
2955         prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
2956
2957         if (cons == prod)
2958                 return;
2959
2960         dma_unmap_addr_set(prod_rx_buf, mapping,
2961                         dma_unmap_addr(cons_rx_buf, mapping));
2962
2963         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2964         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2965         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2966         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2967 }
2968
2969 static int
2970 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2971             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2972             u32 ring_idx)
2973 {
2974         int err;
2975         u16 prod = ring_idx & 0xffff;
2976
2977         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2978         if (unlikely(err)) {
2979                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2980                 if (hdr_len) {
2981                         unsigned int raw_len = len + 4;
2982                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2983
2984                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2985                 }
2986                 return err;
2987         }
2988
2989         skb_reserve(skb, BNX2_RX_OFFSET);
2990         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2991                          PCI_DMA_FROMDEVICE);
2992
2993         if (hdr_len == 0) {
2994                 skb_put(skb, len);
2995                 return 0;
2996         } else {
2997                 unsigned int i, frag_len, frag_size, pages;
2998                 struct sw_pg *rx_pg;
2999                 u16 pg_cons = rxr->rx_pg_cons;
3000                 u16 pg_prod = rxr->rx_pg_prod;
3001
3002                 frag_size = len + 4 - hdr_len;
3003                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3004                 skb_put(skb, hdr_len);
3005
3006                 for (i = 0; i < pages; i++) {
3007                         dma_addr_t mapping_old;
3008
3009                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3010                         if (unlikely(frag_len <= 4)) {
3011                                 unsigned int tail = 4 - frag_len;
3012
3013                                 rxr->rx_pg_cons = pg_cons;
3014                                 rxr->rx_pg_prod = pg_prod;
3015                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3016                                                         pages - i);
3017                                 skb->len -= tail;
3018                                 if (i == 0) {
3019                                         skb->tail -= tail;
3020                                 } else {
3021                                         skb_frag_t *frag =
3022                                                 &skb_shinfo(skb)->frags[i - 1];
3023                                         frag->size -= tail;
3024                                         skb->data_len -= tail;
3025                                         skb->truesize -= tail;
3026                                 }
3027                                 return 0;
3028                         }
3029                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3030
3031                         /* Don't unmap yet.  If we're unable to allocate a new
3032                          * page, we need to recycle the page and the DMA addr.
3033                          */
3034                         mapping_old = dma_unmap_addr(rx_pg, mapping);
3035                         if (i == pages - 1)
3036                                 frag_len -= 4;
3037
3038                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3039                         rx_pg->page = NULL;
3040
3041                         err = bnx2_alloc_rx_page(bp, rxr,
3042                                                  RX_PG_RING_IDX(pg_prod));
3043                         if (unlikely(err)) {
3044                                 rxr->rx_pg_cons = pg_cons;
3045                                 rxr->rx_pg_prod = pg_prod;
3046                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3047                                                         pages - i);
3048                                 return err;
3049                         }
3050
3051                         pci_unmap_page(bp->pdev, mapping_old,
3052                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3053
3054                         frag_size -= frag_len;
3055                         skb->data_len += frag_len;
3056                         skb->truesize += frag_len;
3057                         skb->len += frag_len;
3058
3059                         pg_prod = NEXT_RX_BD(pg_prod);
3060                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3061                 }
3062                 rxr->rx_pg_prod = pg_prod;
3063                 rxr->rx_pg_cons = pg_cons;
3064         }
3065         return 0;
3066 }
3067
3068 static inline u16
3069 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3070 {
3071         u16 cons;
3072
3073         /* Tell compiler that status block fields can change. */
3074         barrier();
3075         cons = *bnapi->hw_rx_cons_ptr;
3076         barrier();
3077         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3078                 cons++;
3079         return cons;
3080 }
3081
3082 static int
3083 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3084 {
3085         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3086         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3087         struct l2_fhdr *rx_hdr;
3088         int rx_pkt = 0, pg_ring_used = 0;
3089
3090         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3091         sw_cons = rxr->rx_cons;
3092         sw_prod = rxr->rx_prod;
3093
3094         /* Memory barrier necessary as speculative reads of the rx
3095          * buffer can be ahead of the index in the status block
3096          */
3097         rmb();
3098         while (sw_cons != hw_cons) {
3099                 unsigned int len, hdr_len;
3100                 u32 status;
3101                 struct sw_bd *rx_buf, *next_rx_buf;
3102                 struct sk_buff *skb;
3103                 dma_addr_t dma_addr;
3104                 u16 vtag = 0;
3105                 int hw_vlan __maybe_unused = 0;
3106
3107                 sw_ring_cons = RX_RING_IDX(sw_cons);
3108                 sw_ring_prod = RX_RING_IDX(sw_prod);
3109
3110                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3111                 skb = rx_buf->skb;
3112                 prefetchw(skb);
3113
3114                 next_rx_buf =
3115                         &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3116                 prefetch(next_rx_buf->desc);
3117
3118                 rx_buf->skb = NULL;
3119
3120                 dma_addr = dma_unmap_addr(rx_buf, mapping);
3121
3122                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3123                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3124                         PCI_DMA_FROMDEVICE);
3125
3126                 rx_hdr = rx_buf->desc;
3127                 len = rx_hdr->l2_fhdr_pkt_len;
3128                 status = rx_hdr->l2_fhdr_status;
3129
3130                 hdr_len = 0;
3131                 if (status & L2_FHDR_STATUS_SPLIT) {
3132                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3133                         pg_ring_used = 1;
3134                 } else if (len > bp->rx_jumbo_thresh) {
3135                         hdr_len = bp->rx_jumbo_thresh;
3136                         pg_ring_used = 1;
3137                 }
3138
3139                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3140                                        L2_FHDR_ERRORS_PHY_DECODE |
3141                                        L2_FHDR_ERRORS_ALIGNMENT |
3142                                        L2_FHDR_ERRORS_TOO_SHORT |
3143                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3144
3145                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3146                                           sw_ring_prod);
3147                         if (pg_ring_used) {
3148                                 int pages;
3149
3150                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3151
3152                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3153                         }
3154                         goto next_rx;
3155                 }
3156
3157                 len -= 4;
3158
3159                 if (len <= bp->rx_copy_thresh) {
3160                         struct sk_buff *new_skb;
3161
3162                         new_skb = netdev_alloc_skb(bp->dev, len + 6);
3163                         if (new_skb == NULL) {
3164                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3165                                                   sw_ring_prod);
3166                                 goto next_rx;
3167                         }
3168
3169                         /* aligned copy */
3170                         skb_copy_from_linear_data_offset(skb,
3171                                                          BNX2_RX_OFFSET - 6,
3172                                       new_skb->data, len + 6);
3173                         skb_reserve(new_skb, 6);
3174                         skb_put(new_skb, len);
3175
3176                         bnx2_reuse_rx_skb(bp, rxr, skb,
3177                                 sw_ring_cons, sw_ring_prod);
3178
3179                         skb = new_skb;
3180                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3181                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3182                         goto next_rx;
3183
3184                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3185                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3186                         vtag = rx_hdr->l2_fhdr_vlan_tag;
3187 #ifdef BCM_VLAN
3188                         if (bp->vlgrp)
3189                                 hw_vlan = 1;
3190                         else
3191 #endif
3192                         {
3193                                 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3194                                         __skb_push(skb, 4);
3195
3196                                 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3197                                 ve->h_vlan_proto = htons(ETH_P_8021Q);
3198                                 ve->h_vlan_TCI = htons(vtag);
3199                                 len += 4;
3200                         }
3201                 }
3202
3203                 skb->protocol = eth_type_trans(skb, bp->dev);
3204
3205                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3206                         (ntohs(skb->protocol) != 0x8100)) {
3207
3208                         dev_kfree_skb(skb);
3209                         goto next_rx;
3210
3211                 }
3212
3213                 skb->ip_summed = CHECKSUM_NONE;
3214                 if (bp->rx_csum &&
3215                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3216                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3217
3218                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3219                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3220                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3221                 }
3222
3223                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3224
3225 #ifdef BCM_VLAN
3226                 if (hw_vlan)
3227                         vlan_gro_receive(&bnapi->napi, bp->vlgrp, vtag, skb);
3228                 else
3229 #endif
3230                         napi_gro_receive(&bnapi->napi, skb);
3231
3232                 rx_pkt++;
3233
3234 next_rx:
3235                 sw_cons = NEXT_RX_BD(sw_cons);
3236                 sw_prod = NEXT_RX_BD(sw_prod);
3237
3238                 if ((rx_pkt == budget))
3239                         break;
3240
3241                 /* Refresh hw_cons to see if there is new work */
3242                 if (sw_cons == hw_cons) {
3243                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3244                         rmb();
3245                 }
3246         }
3247         rxr->rx_cons = sw_cons;
3248         rxr->rx_prod = sw_prod;
3249
3250         if (pg_ring_used)
3251                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3252
3253         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3254
3255         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3256
3257         mmiowb();
3258
3259         return rx_pkt;
3260
3261 }
3262
3263 /* MSI ISR - The only difference between this and the INTx ISR
3264  * is that the MSI interrupt is always serviced.
3265  */
3266 static irqreturn_t
3267 bnx2_msi(int irq, void *dev_instance)
3268 {
3269         struct bnx2_napi *bnapi = dev_instance;
3270         struct bnx2 *bp = bnapi->bp;
3271
3272         prefetch(bnapi->status_blk.msi);
3273         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3274                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3275                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3276
3277         /* Return here if interrupt is disabled. */
3278         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3279                 return IRQ_HANDLED;
3280
3281         napi_schedule(&bnapi->napi);
3282
3283         return IRQ_HANDLED;
3284 }
3285
3286 static irqreturn_t
3287 bnx2_msi_1shot(int irq, void *dev_instance)
3288 {
3289         struct bnx2_napi *bnapi = dev_instance;
3290         struct bnx2 *bp = bnapi->bp;
3291
3292         prefetch(bnapi->status_blk.msi);
3293
3294         /* Return here if interrupt is disabled. */
3295         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3296                 return IRQ_HANDLED;
3297
3298         napi_schedule(&bnapi->napi);
3299
3300         return IRQ_HANDLED;
3301 }
3302
3303 static irqreturn_t
3304 bnx2_interrupt(int irq, void *dev_instance)
3305 {
3306         struct bnx2_napi *bnapi = dev_instance;
3307         struct bnx2 *bp = bnapi->bp;
3308         struct status_block *sblk = bnapi->status_blk.msi;
3309
3310         /* When using INTx, it is possible for the interrupt to arrive
3311          * at the CPU before the status block posted prior to the
3312          * interrupt. Reading a register will flush the status block.
3313          * When using MSI, the MSI message will always complete after
3314          * the status block write.
3315          */
3316         if ((sblk->status_idx == bnapi->last_status_idx) &&
3317             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3318              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3319                 return IRQ_NONE;
3320
3321         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3322                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3323                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3324
3325         /* Read back to deassert IRQ immediately to avoid too many
3326          * spurious interrupts.
3327          */
3328         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3329
3330         /* Return here if interrupt is shared and is disabled. */
3331         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3332                 return IRQ_HANDLED;
3333
3334         if (napi_schedule_prep(&bnapi->napi)) {
3335                 bnapi->last_status_idx = sblk->status_idx;
3336                 __napi_schedule(&bnapi->napi);
3337         }
3338
3339         return IRQ_HANDLED;
3340 }
3341
3342 static inline int
3343 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3344 {
3345         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3346         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3347
3348         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3349             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3350                 return 1;
3351         return 0;
3352 }
3353
3354 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3355                                  STATUS_ATTN_BITS_TIMER_ABORT)
3356
3357 static inline int
3358 bnx2_has_work(struct bnx2_napi *bnapi)
3359 {
3360         struct status_block *sblk = bnapi->status_blk.msi;
3361
3362         if (bnx2_has_fast_work(bnapi))
3363                 return 1;
3364
3365 #ifdef BCM_CNIC
3366         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3367                 return 1;
3368 #endif
3369
3370         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3371             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3372                 return 1;
3373
3374         return 0;
3375 }
3376
3377 static void
3378 bnx2_chk_missed_msi(struct bnx2 *bp)
3379 {
3380         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3381         u32 msi_ctrl;
3382
3383         if (bnx2_has_work(bnapi)) {
3384                 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3385                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3386                         return;
3387
3388                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3389                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3390                                ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3391                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3392                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3393                 }
3394         }
3395
3396         bp->idle_chk_status_idx = bnapi->last_status_idx;
3397 }
3398
3399 #ifdef BCM_CNIC
3400 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3401 {
3402         struct cnic_ops *c_ops;
3403
3404         if (!bnapi->cnic_present)
3405                 return;
3406
3407         rcu_read_lock();
3408         c_ops = rcu_dereference(bp->cnic_ops);
3409         if (c_ops)
3410                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3411                                                       bnapi->status_blk.msi);
3412         rcu_read_unlock();
3413 }
3414 #endif
3415
3416 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3417 {
3418         struct status_block *sblk = bnapi->status_blk.msi;
3419         u32 status_attn_bits = sblk->status_attn_bits;
3420         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3421
3422         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3423             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3424
3425                 bnx2_phy_int(bp, bnapi);
3426
3427                 /* This is needed to take care of transient status
3428                  * during link changes.
3429                  */
3430                 REG_WR(bp, BNX2_HC_COMMAND,
3431                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3432                 REG_RD(bp, BNX2_HC_COMMAND);
3433         }
3434 }
3435
3436 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3437                           int work_done, int budget)
3438 {
3439         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3440         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3441
3442         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3443                 bnx2_tx_int(bp, bnapi, 0);
3444
3445         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3446                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3447
3448         return work_done;
3449 }
3450
3451 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3452 {
3453         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3454         struct bnx2 *bp = bnapi->bp;
3455         int work_done = 0;
3456         struct status_block_msix *sblk = bnapi->status_blk.msix;
3457
3458         while (1) {
3459                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3460                 if (unlikely(work_done >= budget))
3461                         break;
3462
3463                 bnapi->last_status_idx = sblk->status_idx;
3464                 /* status idx must be read before checking for more work. */
3465                 rmb();
3466                 if (likely(!bnx2_has_fast_work(bnapi))) {
3467
3468                         napi_complete(napi);
3469                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3470                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3471                                bnapi->last_status_idx);
3472                         break;
3473                 }
3474         }
3475         return work_done;
3476 }
3477
3478 static int bnx2_poll(struct napi_struct *napi, int budget)
3479 {
3480         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3481         struct bnx2 *bp = bnapi->bp;
3482         int work_done = 0;
3483         struct status_block *sblk = bnapi->status_blk.msi;
3484
3485         while (1) {
3486                 bnx2_poll_link(bp, bnapi);
3487
3488                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3489
3490 #ifdef BCM_CNIC
3491                 bnx2_poll_cnic(bp, bnapi);
3492 #endif
3493
3494                 /* bnapi->last_status_idx is used below to tell the hw how
3495                  * much work has been processed, so we must read it before
3496                  * checking for more work.
3497                  */
3498                 bnapi->last_status_idx = sblk->status_idx;
3499
3500                 if (unlikely(work_done >= budget))
3501                         break;
3502
3503                 rmb();
3504                 if (likely(!bnx2_has_work(bnapi))) {
3505                         napi_complete(napi);
3506                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3507                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3508                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3509                                        bnapi->last_status_idx);
3510                                 break;
3511                         }
3512                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3513                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3514                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3515                                bnapi->last_status_idx);
3516
3517                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3518                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3519                                bnapi->last_status_idx);
3520                         break;
3521                 }
3522         }
3523
3524         return work_done;
3525 }
3526
3527 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3528  * from set_multicast.
3529  */
3530 static void
3531 bnx2_set_rx_mode(struct net_device *dev)
3532 {
3533         struct bnx2 *bp = netdev_priv(dev);
3534         u32 rx_mode, sort_mode;
3535         struct netdev_hw_addr *ha;
3536         int i;
3537
3538         if (!netif_running(dev))
3539                 return;
3540
3541         spin_lock_bh(&bp->phy_lock);
3542
3543         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3544                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3545         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3546 #ifdef BCM_VLAN
3547         if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3548                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3549 #else
3550         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3551                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3552 #endif
3553         if (dev->flags & IFF_PROMISC) {
3554                 /* Promiscuous mode. */
3555                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3556                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3557                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3558         }
3559         else if (dev->flags & IFF_ALLMULTI) {
3560                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3561                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3562                                0xffffffff);
3563                 }
3564                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3565         }
3566         else {
3567                 /* Accept one or more multicast(s). */
3568                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3569                 u32 regidx;
3570                 u32 bit;
3571                 u32 crc;
3572
3573                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3574
3575                 netdev_for_each_mc_addr(ha, dev) {
3576                         crc = ether_crc_le(ETH_ALEN, ha->addr);
3577                         bit = crc & 0xff;
3578                         regidx = (bit & 0xe0) >> 5;
3579                         bit &= 0x1f;
3580                         mc_filter[regidx] |= (1 << bit);
3581                 }
3582
3583                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3584                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3585                                mc_filter[i]);
3586                 }
3587
3588                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3589         }
3590
3591         if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3592                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3593                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3594                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3595         } else if (!(dev->flags & IFF_PROMISC)) {
3596                 /* Add all entries into to the match filter list */
3597                 i = 0;
3598                 netdev_for_each_uc_addr(ha, dev) {
3599                         bnx2_set_mac_addr(bp, ha->addr,
3600                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3601                         sort_mode |= (1 <<
3602                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3603                         i++;
3604                 }
3605
3606         }
3607
3608         if (rx_mode != bp->rx_mode) {
3609                 bp->rx_mode = rx_mode;
3610                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3611         }
3612
3613         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3614         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3615         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3616
3617         spin_unlock_bh(&bp->phy_lock);
3618 }
3619
3620 static int __devinit
3621 check_fw_section(const struct firmware *fw,
3622                  const struct bnx2_fw_file_section *section,
3623                  u32 alignment, bool non_empty)
3624 {
3625         u32 offset = be32_to_cpu(section->offset);
3626         u32 len = be32_to_cpu(section->len);
3627
3628         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3629                 return -EINVAL;
3630         if ((non_empty && len == 0) || len > fw->size - offset ||
3631             len & (alignment - 1))
3632                 return -EINVAL;
3633         return 0;
3634 }
3635
3636 static int __devinit
3637 check_mips_fw_entry(const struct firmware *fw,
3638                     const struct bnx2_mips_fw_file_entry *entry)
3639 {
3640         if (check_fw_section(fw, &entry->text, 4, true) ||
3641             check_fw_section(fw, &entry->data, 4, false) ||
3642             check_fw_section(fw, &entry->rodata, 4, false))
3643                 return -EINVAL;
3644         return 0;
3645 }
3646
3647 static int __devinit
3648 bnx2_request_firmware(struct bnx2 *bp)
3649 {
3650         const char *mips_fw_file, *rv2p_fw_file;
3651         const struct bnx2_mips_fw_file *mips_fw;
3652         const struct bnx2_rv2p_fw_file *rv2p_fw;
3653         int rc;
3654
3655         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3656                 mips_fw_file = FW_MIPS_FILE_09;
3657                 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3658                     (CHIP_ID(bp) == CHIP_ID_5709_A1))
3659                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3660                 else
3661                         rv2p_fw_file = FW_RV2P_FILE_09;
3662         } else {
3663                 mips_fw_file = FW_MIPS_FILE_06;
3664                 rv2p_fw_file = FW_RV2P_FILE_06;
3665         }
3666
3667         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3668         if (rc) {
3669                 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3670                 return rc;
3671         }
3672
3673         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3674         if (rc) {
3675                 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3676                 return rc;
3677         }
3678         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3679         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3680         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3681             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3682             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3683             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3684             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3685             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3686                 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3687                 return -EINVAL;
3688         }
3689         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3690             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3691             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3692                 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3693                 return -EINVAL;
3694         }
3695
3696         return 0;
3697 }
3698
3699 static u32
3700 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3701 {
3702         switch (idx) {
3703         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3704                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3705                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3706                 break;
3707         }
3708         return rv2p_code;
3709 }
3710
3711 static int
3712 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3713              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3714 {
3715         u32 rv2p_code_len, file_offset;
3716         __be32 *rv2p_code;
3717         int i;
3718         u32 val, cmd, addr;
3719
3720         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3721         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3722
3723         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3724
3725         if (rv2p_proc == RV2P_PROC1) {
3726                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3727                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3728         } else {
3729                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3730                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3731         }
3732
3733         for (i = 0; i < rv2p_code_len; i += 8) {
3734                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3735                 rv2p_code++;
3736                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3737                 rv2p_code++;
3738
3739                 val = (i / 8) | cmd;
3740                 REG_WR(bp, addr, val);
3741         }
3742
3743         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3744         for (i = 0; i < 8; i++) {
3745                 u32 loc, code;
3746
3747                 loc = be32_to_cpu(fw_entry->fixup[i]);
3748                 if (loc && ((loc * 4) < rv2p_code_len)) {
3749                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3750                         REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3751                         code = be32_to_cpu(*(rv2p_code + loc));
3752                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3753                         REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3754
3755                         val = (loc / 2) | cmd;
3756                         REG_WR(bp, addr, val);
3757                 }
3758         }
3759
3760         /* Reset the processor, un-stall is done later. */
3761         if (rv2p_proc == RV2P_PROC1) {
3762                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3763         }
3764         else {
3765                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3766         }
3767
3768         return 0;
3769 }
3770
3771 static int
3772 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3773             const struct bnx2_mips_fw_file_entry *fw_entry)
3774 {
3775         u32 addr, len, file_offset;
3776         __be32 *data;
3777         u32 offset;
3778         u32 val;
3779
3780         /* Halt the CPU. */
3781         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3782         val |= cpu_reg->mode_value_halt;
3783         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3784         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3785
3786         /* Load the Text area. */
3787         addr = be32_to_cpu(fw_entry->text.addr);
3788         len = be32_to_cpu(fw_entry->text.len);
3789         file_offset = be32_to_cpu(fw_entry->text.offset);
3790         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3791
3792         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3793         if (len) {
3794                 int j;
3795
3796                 for (j = 0; j < (len / 4); j++, offset += 4)
3797                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3798         }
3799
3800         /* Load the Data area. */
3801         addr = be32_to_cpu(fw_entry->data.addr);
3802         len = be32_to_cpu(fw_entry->data.len);
3803         file_offset = be32_to_cpu(fw_entry->data.offset);
3804         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3805
3806         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3807         if (len) {
3808                 int j;
3809
3810                 for (j = 0; j < (len / 4); j++, offset += 4)
3811                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3812         }
3813
3814         /* Load the Read-Only area. */
3815         addr = be32_to_cpu(fw_entry->rodata.addr);
3816         len = be32_to_cpu(fw_entry->rodata.len);
3817         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3818         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3819
3820         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3821         if (len) {
3822                 int j;
3823
3824                 for (j = 0; j < (len / 4); j++, offset += 4)
3825                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3826         }
3827
3828         /* Clear the pre-fetch instruction. */
3829         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3830
3831         val = be32_to_cpu(fw_entry->start_addr);
3832         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3833
3834         /* Start the CPU. */
3835         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3836         val &= ~cpu_reg->mode_value_halt;
3837         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3838         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3839
3840         return 0;
3841 }
3842
3843 static int
3844 bnx2_init_cpus(struct bnx2 *bp)
3845 {
3846         const struct bnx2_mips_fw_file *mips_fw =
3847                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3848         const struct bnx2_rv2p_fw_file *rv2p_fw =
3849                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3850         int rc;
3851
3852         /* Initialize the RV2P processor. */
3853         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3854         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3855
3856         /* Initialize the RX Processor. */
3857         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3858         if (rc)
3859                 goto init_cpu_err;
3860
3861         /* Initialize the TX Processor. */
3862         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3863         if (rc)
3864                 goto init_cpu_err;
3865
3866         /* Initialize the TX Patch-up Processor. */
3867         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3868         if (rc)
3869                 goto init_cpu_err;
3870
3871         /* Initialize the Completion Processor. */
3872         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3873         if (rc)
3874                 goto init_cpu_err;
3875
3876         /* Initialize the Command Processor. */
3877         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3878
3879 init_cpu_err:
3880         return rc;
3881 }
3882
3883 static int
3884 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3885 {
3886         u16 pmcsr;
3887
3888         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3889
3890         switch (state) {
3891         case PCI_D0: {
3892                 u32 val;
3893
3894                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3895                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3896                         PCI_PM_CTRL_PME_STATUS);
3897
3898                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3899                         /* delay required during transition out of D3hot */
3900                         msleep(20);
3901
3902                 val = REG_RD(bp, BNX2_EMAC_MODE);
3903                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3904                 val &= ~BNX2_EMAC_MODE_MPKT;
3905                 REG_WR(bp, BNX2_EMAC_MODE, val);
3906
3907                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3908                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3909                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3910                 break;
3911         }
3912         case PCI_D3hot: {
3913                 int i;
3914                 u32 val, wol_msg;
3915
3916                 if (bp->wol) {
3917                         u32 advertising;
3918                         u8 autoneg;
3919
3920                         autoneg = bp->autoneg;
3921                         advertising = bp->advertising;
3922
3923                         if (bp->phy_port == PORT_TP) {
3924                                 bp->autoneg = AUTONEG_SPEED;
3925                                 bp->advertising = ADVERTISED_10baseT_Half |
3926                                         ADVERTISED_10baseT_Full |
3927                                         ADVERTISED_100baseT_Half |
3928                                         ADVERTISED_100baseT_Full |
3929                                         ADVERTISED_Autoneg;
3930                         }
3931
3932                         spin_lock_bh(&bp->phy_lock);
3933                         bnx2_setup_phy(bp, bp->phy_port);
3934                         spin_unlock_bh(&bp->phy_lock);
3935
3936                         bp->autoneg = autoneg;
3937                         bp->advertising = advertising;
3938
3939                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3940
3941                         val = REG_RD(bp, BNX2_EMAC_MODE);
3942
3943                         /* Enable port mode. */
3944                         val &= ~BNX2_EMAC_MODE_PORT;
3945                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3946                                BNX2_EMAC_MODE_ACPI_RCVD |
3947                                BNX2_EMAC_MODE_MPKT;
3948                         if (bp->phy_port == PORT_TP)
3949                                 val |= BNX2_EMAC_MODE_PORT_MII;
3950                         else {
3951                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3952                                 if (bp->line_speed == SPEED_2500)
3953                                         val |= BNX2_EMAC_MODE_25G_MODE;
3954                         }
3955
3956                         REG_WR(bp, BNX2_EMAC_MODE, val);
3957
3958                         /* receive all multicast */
3959                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3960                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3961                                        0xffffffff);
3962                         }
3963                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3964                                BNX2_EMAC_RX_MODE_SORT_MODE);
3965
3966                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3967                               BNX2_RPM_SORT_USER0_MC_EN;
3968                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3969                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3970                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3971                                BNX2_RPM_SORT_USER0_ENA);
3972
3973                         /* Need to enable EMAC and RPM for WOL. */
3974                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3975                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3976                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3977                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3978
3979                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3980                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3981                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3982
3983                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3984                 }
3985                 else {
3986                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3987                 }
3988
3989                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3990                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3991                                      1, 0);
3992
3993                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3994                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3995                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3996
3997                         if (bp->wol)
3998                                 pmcsr |= 3;
3999                 }
4000                 else {
4001                         pmcsr |= 3;
4002                 }
4003                 if (bp->wol) {
4004                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
4005                 }
4006                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
4007                                       pmcsr);
4008
4009                 /* No more memory access after this point until
4010                  * device is brought back to D0.
4011                  */
4012                 udelay(50);
4013                 break;
4014         }
4015         default:
4016                 return -EINVAL;
4017         }
4018         return 0;
4019 }
4020
4021 static int
4022 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4023 {
4024         u32 val;
4025         int j;
4026
4027         /* Request access to the flash interface. */
4028         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4029         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4030                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4031                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4032                         break;
4033
4034                 udelay(5);
4035         }
4036
4037         if (j >= NVRAM_TIMEOUT_COUNT)
4038                 return -EBUSY;
4039
4040         return 0;
4041 }
4042
4043 static int
4044 bnx2_release_nvram_lock(struct bnx2 *bp)
4045 {
4046         int j;
4047         u32 val;
4048
4049         /* Relinquish nvram interface. */
4050         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4051
4052         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4053                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4054                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4055                         break;
4056
4057                 udelay(5);
4058         }
4059
4060         if (j >= NVRAM_TIMEOUT_COUNT)
4061                 return -EBUSY;
4062
4063         return 0;
4064 }
4065
4066
4067 static int
4068 bnx2_enable_nvram_write(struct bnx2 *bp)
4069 {
4070         u32 val;
4071
4072         val = REG_RD(bp, BNX2_MISC_CFG);
4073         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4074
4075         if (bp->flash_info->flags & BNX2_NV_WREN) {
4076                 int j;
4077
4078                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4079                 REG_WR(bp, BNX2_NVM_COMMAND,
4080                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4081
4082                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4083                         udelay(5);
4084
4085                         val = REG_RD(bp, BNX2_NVM_COMMAND);
4086                         if (val & BNX2_NVM_COMMAND_DONE)
4087                                 break;
4088                 }
4089
4090                 if (j >= NVRAM_TIMEOUT_COUNT)
4091                         return -EBUSY;
4092         }
4093         return 0;
4094 }
4095
4096 static void
4097 bnx2_disable_nvram_write(struct bnx2 *bp)
4098 {
4099         u32 val;
4100
4101         val = REG_RD(bp, BNX2_MISC_CFG);
4102         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4103 }
4104
4105
4106 static void
4107 bnx2_enable_nvram_access(struct bnx2 *bp)
4108 {
4109         u32 val;
4110
4111         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4112         /* Enable both bits, even on read. */
4113         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4114                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4115 }
4116
4117 static void
4118 bnx2_disable_nvram_access(struct bnx2 *bp)
4119 {
4120         u32 val;
4121
4122         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4123         /* Disable both bits, even after read. */
4124         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4125                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4126                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4127 }
4128
4129 static int
4130 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4131 {
4132         u32 cmd;
4133         int j;
4134
4135         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4136                 /* Buffered flash, no erase needed */
4137                 return 0;
4138
4139         /* Build an erase command */
4140         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4141               BNX2_NVM_COMMAND_DOIT;
4142
4143         /* Need to clear DONE bit separately. */
4144         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4145
4146         /* Address of the NVRAM to read from. */
4147         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4148
4149         /* Issue an erase command. */
4150         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4151
4152         /* Wait for completion. */
4153         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4154                 u32 val;
4155
4156                 udelay(5);
4157
4158                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4159                 if (val & BNX2_NVM_COMMAND_DONE)
4160                         break;
4161         }
4162
4163         if (j >= NVRAM_TIMEOUT_COUNT)
4164                 return -EBUSY;
4165
4166         return 0;
4167 }
4168
4169 static int
4170 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4171 {
4172         u32 cmd;
4173         int j;
4174
4175         /* Build the command word. */
4176         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4177
4178         /* Calculate an offset of a buffered flash, not needed for 5709. */
4179         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4180                 offset = ((offset / bp->flash_info->page_size) <<
4181                            bp->flash_info->page_bits) +
4182                           (offset % bp->flash_info->page_size);
4183         }
4184
4185         /* Need to clear DONE bit separately. */
4186         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4187
4188         /* Address of the NVRAM to read from. */
4189         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4190
4191         /* Issue a read command. */
4192         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4193
4194         /* Wait for completion. */
4195         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4196                 u32 val;
4197
4198                 udelay(5);
4199
4200                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4201                 if (val & BNX2_NVM_COMMAND_DONE) {
4202                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4203                         memcpy(ret_val, &v, 4);
4204                         break;
4205                 }
4206         }
4207         if (j >= NVRAM_TIMEOUT_COUNT)
4208                 return -EBUSY;
4209
4210         return 0;
4211 }
4212
4213
4214 static int
4215 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4216 {
4217         u32 cmd;
4218         __be32 val32;
4219         int j;
4220
4221         /* Build the command word. */
4222         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4223
4224         /* Calculate an offset of a buffered flash, not needed for 5709. */
4225         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4226                 offset = ((offset / bp->flash_info->page_size) <<
4227                           bp->flash_info->page_bits) +
4228                          (offset % bp->flash_info->page_size);
4229         }
4230
4231         /* Need to clear DONE bit separately. */
4232         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4233
4234         memcpy(&val32, val, 4);
4235
4236         /* Write the data. */
4237         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4238
4239         /* Address of the NVRAM to write to. */
4240         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4241
4242         /* Issue the write command. */
4243         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4244
4245         /* Wait for completion. */
4246         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4247                 udelay(5);
4248
4249                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4250                         break;
4251         }
4252         if (j >= NVRAM_TIMEOUT_COUNT)
4253                 return -EBUSY;
4254
4255         return 0;
4256 }
4257
4258 static int
4259 bnx2_init_nvram(struct bnx2 *bp)
4260 {
4261         u32 val;
4262         int j, entry_count, rc = 0;
4263         const struct flash_spec *flash;
4264
4265         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4266                 bp->flash_info = &flash_5709;
4267                 goto get_flash_size;
4268         }
4269
4270         /* Determine the selected interface. */
4271         val = REG_RD(bp, BNX2_NVM_CFG1);
4272
4273         entry_count = ARRAY_SIZE(flash_table);
4274
4275         if (val & 0x40000000) {
4276
4277                 /* Flash interface has been reconfigured */
4278                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4279                      j++, flash++) {
4280                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4281                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4282                                 bp->flash_info = flash;
4283                                 break;
4284                         }
4285                 }
4286         }
4287         else {
4288                 u32 mask;
4289                 /* Not yet been reconfigured */
4290
4291                 if (val & (1 << 23))
4292                         mask = FLASH_BACKUP_STRAP_MASK;
4293                 else
4294                         mask = FLASH_STRAP_MASK;
4295
4296                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4297                         j++, flash++) {
4298
4299                         if ((val & mask) == (flash->strapping & mask)) {
4300                                 bp->flash_info = flash;
4301
4302                                 /* Request access to the flash interface. */
4303                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4304                                         return rc;
4305
4306                                 /* Enable access to flash interface */
4307                                 bnx2_enable_nvram_access(bp);
4308
4309                                 /* Reconfigure the flash interface */
4310                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4311                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4312                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4313                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4314
4315                                 /* Disable access to flash interface */
4316                                 bnx2_disable_nvram_access(bp);
4317                                 bnx2_release_nvram_lock(bp);
4318
4319                                 break;
4320                         }
4321                 }
4322         } /* if (val & 0x40000000) */
4323
4324         if (j == entry_count) {
4325                 bp->flash_info = NULL;
4326                 pr_alert("Unknown flash/EEPROM type\n");
4327                 return -ENODEV;
4328         }
4329
4330 get_flash_size:
4331         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4332         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4333         if (val)
4334                 bp->flash_size = val;
4335         else
4336                 bp->flash_size = bp->flash_info->total_size;
4337
4338         return rc;
4339 }
4340
4341 static int
4342 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4343                 int buf_size)
4344 {
4345         int rc = 0;
4346         u32 cmd_flags, offset32, len32, extra;
4347
4348         if (buf_size == 0)
4349                 return 0;
4350
4351         /* Request access to the flash interface. */
4352         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4353                 return rc;
4354
4355         /* Enable access to flash interface */
4356         bnx2_enable_nvram_access(bp);
4357
4358         len32 = buf_size;
4359         offset32 = offset;
4360         extra = 0;
4361
4362         cmd_flags = 0;
4363
4364         if (offset32 & 3) {
4365                 u8 buf[4];
4366                 u32 pre_len;
4367
4368                 offset32 &= ~3;
4369                 pre_len = 4 - (offset & 3);
4370
4371                 if (pre_len >= len32) {
4372                         pre_len = len32;
4373                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4374                                     BNX2_NVM_COMMAND_LAST;
4375                 }
4376                 else {
4377                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4378                 }
4379
4380                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4381
4382                 if (rc)
4383                         return rc;
4384
4385                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4386
4387                 offset32 += 4;
4388                 ret_buf += pre_len;
4389                 len32 -= pre_len;
4390         }
4391         if (len32 & 3) {
4392                 extra = 4 - (len32 & 3);
4393                 len32 = (len32 + 4) & ~3;
4394         }
4395
4396         if (len32 == 4) {
4397                 u8 buf[4];
4398
4399                 if (cmd_flags)
4400                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4401                 else
4402                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4403                                     BNX2_NVM_COMMAND_LAST;
4404
4405                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4406
4407                 memcpy(ret_buf, buf, 4 - extra);
4408         }
4409         else if (len32 > 0) {
4410                 u8 buf[4];
4411
4412                 /* Read the first word. */
4413                 if (cmd_flags)
4414                         cmd_flags = 0;
4415                 else
4416                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4417
4418                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4419
4420                 /* Advance to the next dword. */
4421                 offset32 += 4;
4422                 ret_buf += 4;
4423                 len32 -= 4;
4424
4425                 while (len32 > 4 && rc == 0) {
4426                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4427
4428                         /* Advance to the next dword. */
4429                         offset32 += 4;
4430                         ret_buf += 4;
4431                         len32 -= 4;
4432                 }
4433
4434                 if (rc)
4435                         return rc;
4436
4437                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4438                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4439
4440                 memcpy(ret_buf, buf, 4 - extra);
4441         }
4442
4443         /* Disable access to flash interface */
4444         bnx2_disable_nvram_access(bp);
4445
4446         bnx2_release_nvram_lock(bp);
4447
4448         return rc;
4449 }
4450
4451 static int
4452 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4453                 int buf_size)
4454 {
4455         u32 written, offset32, len32;
4456         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4457         int rc = 0;
4458         int align_start, align_end;
4459
4460         buf = data_buf;
4461         offset32 = offset;
4462         len32 = buf_size;
4463         align_start = align_end = 0;
4464
4465         if ((align_start = (offset32 & 3))) {
4466                 offset32 &= ~3;
4467                 len32 += align_start;
4468                 if (len32 < 4)
4469                         len32 = 4;
4470                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4471                         return rc;
4472         }
4473
4474         if (len32 & 3) {
4475                 align_end = 4 - (len32 & 3);
4476                 len32 += align_end;
4477                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4478                         return rc;
4479         }
4480
4481         if (align_start || align_end) {
4482                 align_buf = kmalloc(len32, GFP_KERNEL);
4483                 if (align_buf == NULL)
4484                         return -ENOMEM;
4485                 if (align_start) {
4486                         memcpy(align_buf, start, 4);
4487                 }
4488                 if (align_end) {
4489                         memcpy(align_buf + len32 - 4, end, 4);
4490                 }
4491                 memcpy(align_buf + align_start, data_buf, buf_size);
4492                 buf = align_buf;
4493         }
4494
4495         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4496                 flash_buffer = kmalloc(264, GFP_KERNEL);
4497                 if (flash_buffer == NULL) {
4498                         rc = -ENOMEM;
4499                         goto nvram_write_end;
4500                 }
4501         }
4502
4503         written = 0;
4504         while ((written < len32) && (rc == 0)) {
4505                 u32 page_start, page_end, data_start, data_end;
4506                 u32 addr, cmd_flags;
4507                 int i;
4508
4509                 /* Find the page_start addr */
4510                 page_start = offset32 + written;
4511                 page_start -= (page_start % bp->flash_info->page_size);
4512                 /* Find the page_end addr */
4513                 page_end = page_start + bp->flash_info->page_size;
4514                 /* Find the data_start addr */
4515                 data_start = (written == 0) ? offset32 : page_start;
4516                 /* Find the data_end addr */
4517                 data_end = (page_end > offset32 + len32) ?
4518                         (offset32 + len32) : page_end;
4519
4520                 /* Request access to the flash interface. */
4521                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4522                         goto nvram_write_end;
4523
4524                 /* Enable access to flash interface */
4525                 bnx2_enable_nvram_access(bp);
4526
4527                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4528                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4529                         int j;
4530
4531                         /* Read the whole page into the buffer
4532                          * (non-buffer flash only) */
4533                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4534                                 if (j == (bp->flash_info->page_size - 4)) {
4535                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4536                                 }
4537                                 rc = bnx2_nvram_read_dword(bp,
4538                                         page_start + j,
4539                                         &flash_buffer[j],
4540                                         cmd_flags);
4541
4542                                 if (rc)
4543                                         goto nvram_write_end;
4544
4545                                 cmd_flags = 0;
4546                         }
4547                 }
4548
4549                 /* Enable writes to flash interface (unlock write-protect) */
4550                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4551                         goto nvram_write_end;
4552
4553                 /* Loop to write back the buffer data from page_start to
4554                  * data_start */
4555                 i = 0;
4556                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4557                         /* Erase the page */
4558                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4559                                 goto nvram_write_end;
4560
4561                         /* Re-enable the write again for the actual write */
4562                         bnx2_enable_nvram_write(bp);
4563
4564                         for (addr = page_start; addr < data_start;
4565                                 addr += 4, i += 4) {
4566
4567                                 rc = bnx2_nvram_write_dword(bp, addr,
4568                                         &flash_buffer[i], cmd_flags);
4569
4570                                 if (rc != 0)
4571                                         goto nvram_write_end;
4572
4573                                 cmd_flags = 0;
4574                         }
4575                 }
4576
4577                 /* Loop to write the new data from data_start to data_end */
4578                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4579                         if ((addr == page_end - 4) ||
4580                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4581                                  (addr == data_end - 4))) {
4582
4583                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4584                         }
4585                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4586                                 cmd_flags);
4587
4588                         if (rc != 0)
4589                                 goto nvram_write_end;
4590
4591                         cmd_flags = 0;
4592                         buf += 4;
4593                 }
4594
4595                 /* Loop to write back the buffer data from data_end
4596                  * to page_end */
4597                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4598                         for (addr = data_end; addr < page_end;
4599                                 addr += 4, i += 4) {
4600
4601                                 if (addr == page_end-4) {
4602                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4603                                 }
4604                                 rc = bnx2_nvram_write_dword(bp, addr,
4605                                         &flash_buffer[i], cmd_flags);
4606
4607                                 if (rc != 0)
4608                                         goto nvram_write_end;
4609
4610                                 cmd_flags = 0;
4611                         }
4612                 }
4613
4614                 /* Disable writes to flash interface (lock write-protect) */
4615                 bnx2_disable_nvram_write(bp);
4616
4617                 /* Disable access to flash interface */
4618                 bnx2_disable_nvram_access(bp);
4619                 bnx2_release_nvram_lock(bp);
4620
4621                 /* Increment written */
4622                 written += data_end - data_start;
4623         }
4624
4625 nvram_write_end:
4626         kfree(flash_buffer);
4627         kfree(align_buf);
4628         return rc;
4629 }
4630
4631 static void
4632 bnx2_init_fw_cap(struct bnx2 *bp)
4633 {
4634         u32 val, sig = 0;
4635
4636         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4637         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4638
4639         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4640                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4641
4642         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4643         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4644                 return;
4645
4646         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4647                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4648                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4649         }
4650
4651         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4652             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4653                 u32 link;
4654
4655                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4656
4657                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4658                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4659                         bp->phy_port = PORT_FIBRE;
4660                 else
4661                         bp->phy_port = PORT_TP;
4662
4663                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4664                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4665         }
4666
4667         if (netif_running(bp->dev) && sig)
4668                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4669 }
4670
4671 static void
4672 bnx2_setup_msix_tbl(struct bnx2 *bp)
4673 {
4674         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4675
4676         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4677         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4678 }
4679
4680 static int
4681 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4682 {
4683         u32 val;
4684         int i, rc = 0;
4685         u8 old_port;
4686
4687         /* Wait for the current PCI transaction to complete before
4688          * issuing a reset. */
4689         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4690                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4691                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4692                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4693                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4694         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4695         udelay(5);
4696
4697         /* Wait for the firmware to tell us it is ok to issue a reset. */
4698         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4699
4700         /* Deposit a driver reset signature so the firmware knows that
4701          * this is a soft reset. */
4702         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4703                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4704
4705         /* Do a dummy read to force the chip to complete all current transaction
4706          * before we issue a reset. */
4707         val = REG_RD(bp, BNX2_MISC_ID);
4708
4709         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4710                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4711                 REG_RD(bp, BNX2_MISC_COMMAND);
4712                 udelay(5);
4713
4714                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4715                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4716
4717                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4718
4719         } else {
4720                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4721                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4722                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4723
4724                 /* Chip reset. */
4725                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4726
4727                 /* Reading back any register after chip reset will hang the
4728                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4729                  * of margin for write posting.
4730                  */
4731                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4732                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4733                         msleep(20);
4734
4735                 /* Reset takes approximate 30 usec */
4736                 for (i = 0; i < 10; i++) {
4737                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4738                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4739                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4740                                 break;
4741                         udelay(10);
4742                 }
4743
4744                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4745                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4746                         pr_err("Chip reset did not complete\n");
4747                         return -EBUSY;
4748                 }
4749         }
4750
4751         /* Make sure byte swapping is properly configured. */
4752         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4753         if (val != 0x01020304) {
4754                 pr_err("Chip not in correct endian mode\n");
4755                 return -ENODEV;
4756         }
4757
4758         /* Wait for the firmware to finish its initialization. */
4759         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4760         if (rc)
4761                 return rc;
4762
4763         spin_lock_bh(&bp->phy_lock);
4764         old_port = bp->phy_port;
4765         bnx2_init_fw_cap(bp);
4766         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4767             old_port != bp->phy_port)
4768                 bnx2_set_default_remote_link(bp);
4769         spin_unlock_bh(&bp->phy_lock);
4770
4771         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4772                 /* Adjust the voltage regular to two steps lower.  The default
4773                  * of this register is 0x0000000e. */
4774                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4775
4776                 /* Remove bad rbuf memory from the free pool. */
4777                 rc = bnx2_alloc_bad_rbuf(bp);
4778         }
4779
4780         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4781                 bnx2_setup_msix_tbl(bp);
4782                 /* Prevent MSIX table reads and write from timing out */
4783                 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4784                         BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4785         }
4786
4787         return rc;
4788 }
4789
4790 static int
4791 bnx2_init_chip(struct bnx2 *bp)
4792 {
4793         u32 val, mtu;
4794         int rc, i;
4795
4796         /* Make sure the interrupt is not active. */
4797         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4798
4799         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4800               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4801 #ifdef __BIG_ENDIAN
4802               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4803 #endif
4804               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4805               DMA_READ_CHANS << 12 |
4806               DMA_WRITE_CHANS << 16;
4807
4808         val |= (0x2 << 20) | (1 << 11);
4809
4810         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4811                 val |= (1 << 23);
4812
4813         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4814             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4815                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4816
4817         REG_WR(bp, BNX2_DMA_CONFIG, val);
4818
4819         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4820                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4821                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4822                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4823         }
4824
4825         if (bp->flags & BNX2_FLAG_PCIX) {
4826                 u16 val16;
4827
4828                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4829                                      &val16);
4830                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4831                                       val16 & ~PCI_X_CMD_ERO);
4832         }
4833
4834         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4835                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4836                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4837                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4838
4839         /* Initialize context mapping and zero out the quick contexts.  The
4840          * context block must have already been enabled. */
4841         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4842                 rc = bnx2_init_5709_context(bp);
4843                 if (rc)
4844                         return rc;
4845         } else
4846                 bnx2_init_context(bp);
4847
4848         if ((rc = bnx2_init_cpus(bp)) != 0)
4849                 return rc;
4850
4851         bnx2_init_nvram(bp);
4852
4853         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4854
4855         val = REG_RD(bp, BNX2_MQ_CONFIG);
4856         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4857         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4858         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4859                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4860                 if (CHIP_REV(bp) == CHIP_REV_Ax)
4861                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4862         }
4863
4864         REG_WR(bp, BNX2_MQ_CONFIG, val);
4865
4866         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4867         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4868         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4869
4870         val = (BCM_PAGE_BITS - 8) << 24;
4871         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4872
4873         /* Configure page size. */
4874         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4875         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4876         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4877         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4878
4879         val = bp->mac_addr[0] +
4880               (bp->mac_addr[1] << 8) +
4881               (bp->mac_addr[2] << 16) +
4882               bp->mac_addr[3] +
4883               (bp->mac_addr[4] << 8) +
4884               (bp->mac_addr[5] << 16);
4885         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4886
4887         /* Program the MTU.  Also include 4 bytes for CRC32. */
4888         mtu = bp->dev->mtu;
4889         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4890         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4891                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4892         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4893
4894         if (mtu < 1500)
4895                 mtu = 1500;
4896
4897         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4898         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4899         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4900
4901         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4902         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4903                 bp->bnx2_napi[i].last_status_idx = 0;
4904
4905         bp->idle_chk_status_idx = 0xffff;
4906
4907         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4908
4909         /* Set up how to generate a link change interrupt. */
4910         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4911
4912         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4913                (u64) bp->status_blk_mapping & 0xffffffff);
4914         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4915
4916         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4917                (u64) bp->stats_blk_mapping & 0xffffffff);
4918         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4919                (u64) bp->stats_blk_mapping >> 32);
4920
4921         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4922                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4923
4924         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4925                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4926
4927         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4928                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4929
4930         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4931
4932         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4933
4934         REG_WR(bp, BNX2_HC_COM_TICKS,
4935                (bp->com_ticks_int << 16) | bp->com_ticks);
4936
4937         REG_WR(bp, BNX2_HC_CMD_TICKS,
4938                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4939
4940         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4941                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4942         else
4943                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4944         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4945
4946         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4947                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4948         else {
4949                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4950                       BNX2_HC_CONFIG_COLLECT_STATS;
4951         }
4952
4953         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4954                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4955                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4956
4957                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4958         }
4959
4960         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4961                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4962
4963         REG_WR(bp, BNX2_HC_CONFIG, val);
4964
4965         for (i = 1; i < bp->irq_nvecs; i++) {
4966                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4967                            BNX2_HC_SB_CONFIG_1;
4968
4969                 REG_WR(bp, base,
4970                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4971                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4972                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4973
4974                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4975                         (bp->tx_quick_cons_trip_int << 16) |
4976                          bp->tx_quick_cons_trip);
4977
4978                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4979                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4980
4981                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4982                        (bp->rx_quick_cons_trip_int << 16) |
4983                         bp->rx_quick_cons_trip);
4984
4985                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4986                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4987         }
4988
4989         /* Clear internal stats counters. */
4990         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4991
4992         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4993
4994         /* Initialize the receive filter. */
4995         bnx2_set_rx_mode(bp->dev);
4996
4997         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4998                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4999                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5000                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5001         }
5002         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5003                           1, 0);
5004
5005         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5006         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5007
5008         udelay(20);
5009
5010         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
5011
5012         return rc;
5013 }
5014
5015 static void
5016 bnx2_clear_ring_states(struct bnx2 *bp)
5017 {
5018         struct bnx2_napi *bnapi;
5019         struct bnx2_tx_ring_info *txr;
5020         struct bnx2_rx_ring_info *rxr;
5021         int i;
5022
5023         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5024                 bnapi = &bp->bnx2_napi[i];
5025                 txr = &bnapi->tx_ring;
5026                 rxr = &bnapi->rx_ring;
5027
5028                 txr->tx_cons = 0;
5029                 txr->hw_tx_cons = 0;
5030                 rxr->rx_prod_bseq = 0;
5031                 rxr->rx_prod = 0;
5032                 rxr->rx_cons = 0;
5033                 rxr->rx_pg_prod = 0;
5034                 rxr->rx_pg_cons = 0;
5035         }
5036 }
5037
5038 static void
5039 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5040 {
5041         u32 val, offset0, offset1, offset2, offset3;
5042         u32 cid_addr = GET_CID_ADDR(cid);
5043
5044         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5045                 offset0 = BNX2_L2CTX_TYPE_XI;
5046                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5047                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5048                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5049         } else {
5050                 offset0 = BNX2_L2CTX_TYPE;
5051                 offset1 = BNX2_L2CTX_CMD_TYPE;
5052                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5053                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5054         }
5055         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5056         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5057
5058         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5059         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5060
5061         val = (u64) txr->tx_desc_mapping >> 32;
5062         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5063
5064         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5065         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5066 }
5067
5068 static void
5069 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5070 {
5071         struct tx_bd *txbd;
5072         u32 cid = TX_CID;
5073         struct bnx2_napi *bnapi;
5074         struct bnx2_tx_ring_info *txr;
5075
5076         bnapi = &bp->bnx2_napi[ring_num];
5077         txr = &bnapi->tx_ring;
5078
5079         if (ring_num == 0)
5080                 cid = TX_CID;
5081         else
5082                 cid = TX_TSS_CID + ring_num - 1;
5083
5084         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5085
5086         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5087
5088         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5089         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5090
5091         txr->tx_prod = 0;
5092         txr->tx_prod_bseq = 0;
5093
5094         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5095         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5096
5097         bnx2_init_tx_context(bp, cid, txr);
5098 }
5099
5100 static void
5101 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5102                      int num_rings)
5103 {
5104         int i;
5105         struct rx_bd *rxbd;
5106
5107         for (i = 0; i < num_rings; i++) {
5108                 int j;
5109
5110                 rxbd = &rx_ring[i][0];
5111                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5112                         rxbd->rx_bd_len = buf_size;
5113                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5114                 }
5115                 if (i == (num_rings - 1))
5116                         j = 0;
5117                 else
5118                         j = i + 1;
5119                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5120                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5121         }
5122 }
5123
5124 static void
5125 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5126 {
5127         int i;
5128         u16 prod, ring_prod;
5129         u32 cid, rx_cid_addr, val;
5130         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5131         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5132
5133         if (ring_num == 0)
5134                 cid = RX_CID;
5135         else
5136                 cid = RX_RSS_CID + ring_num - 1;
5137
5138         rx_cid_addr = GET_CID_ADDR(cid);
5139
5140         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5141                              bp->rx_buf_use_size, bp->rx_max_ring);
5142
5143         bnx2_init_rx_context(bp, cid);
5144
5145         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5146                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5147                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5148         }
5149
5150         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5151         if (bp->rx_pg_ring_size) {
5152                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5153                                      rxr->rx_pg_desc_mapping,
5154                                      PAGE_SIZE, bp->rx_max_pg_ring);
5155                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5156                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5157                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5158                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5159
5160                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5161                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5162
5163                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5164                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5165
5166                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5167                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5168         }
5169
5170         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5171         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5172
5173         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5174         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5175
5176         ring_prod = prod = rxr->rx_pg_prod;
5177         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5178                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
5179                         netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5180                                     ring_num, i, bp->rx_pg_ring_size);
5181                         break;
5182                 }
5183                 prod = NEXT_RX_BD(prod);
5184                 ring_prod = RX_PG_RING_IDX(prod);
5185         }
5186         rxr->rx_pg_prod = prod;
5187
5188         ring_prod = prod = rxr->rx_prod;
5189         for (i = 0; i < bp->rx_ring_size; i++) {
5190                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
5191                         netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5192                                     ring_num, i, bp->rx_ring_size);
5193                         break;
5194                 }
5195                 prod = NEXT_RX_BD(prod);
5196                 ring_prod = RX_RING_IDX(prod);
5197         }
5198         rxr->rx_prod = prod;
5199
5200         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5201         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5202         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5203
5204         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5205         REG_WR16(bp, rxr->rx_bidx_addr, prod);
5206
5207         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5208 }
5209
5210 static void
5211 bnx2_init_all_rings(struct bnx2 *bp)
5212 {
5213         int i;
5214         u32 val;
5215
5216         bnx2_clear_ring_states(bp);
5217
5218         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5219         for (i = 0; i < bp->num_tx_rings; i++)
5220                 bnx2_init_tx_ring(bp, i);
5221
5222         if (bp->num_tx_rings > 1)
5223                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5224                        (TX_TSS_CID << 7));
5225
5226         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5227         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5228
5229         for (i = 0; i < bp->num_rx_rings; i++)
5230                 bnx2_init_rx_ring(bp, i);
5231
5232         if (bp->num_rx_rings > 1) {
5233                 u32 tbl_32;
5234                 u8 *tbl = (u8 *) &tbl_32;
5235
5236                 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5237                                 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5238
5239                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5240                         tbl[i % 4] = i % (bp->num_rx_rings - 1);
5241                         if ((i % 4) == 3)
5242                                 bnx2_reg_wr_ind(bp,
5243                                                 BNX2_RXP_SCRATCH_RSS_TBL + i,
5244                                                 cpu_to_be32(tbl_32));
5245                 }
5246
5247                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5248                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5249
5250                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5251
5252         }
5253 }
5254
5255 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5256 {
5257         u32 max, num_rings = 1;
5258
5259         while (ring_size > MAX_RX_DESC_CNT) {
5260                 ring_size -= MAX_RX_DESC_CNT;
5261                 num_rings++;
5262         }
5263         /* round to next power of 2 */
5264         max = max_size;
5265         while ((max & num_rings) == 0)
5266                 max >>= 1;
5267
5268         if (num_rings != max)
5269                 max <<= 1;
5270
5271         return max;
5272 }
5273
5274 static void
5275 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5276 {
5277         u32 rx_size, rx_space, jumbo_size;
5278
5279         /* 8 for CRC and VLAN */
5280         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5281
5282         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5283                 sizeof(struct skb_shared_info);
5284
5285         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5286         bp->rx_pg_ring_size = 0;
5287         bp->rx_max_pg_ring = 0;
5288         bp->rx_max_pg_ring_idx = 0;
5289         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5290                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5291
5292                 jumbo_size = size * pages;
5293                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5294                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5295
5296                 bp->rx_pg_ring_size = jumbo_size;
5297                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5298                                                         MAX_RX_PG_RINGS);
5299                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5300                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5301                 bp->rx_copy_thresh = 0;
5302         }
5303
5304         bp->rx_buf_use_size = rx_size;
5305         /* hw alignment */
5306         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5307         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5308         bp->rx_ring_size = size;
5309         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5310         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5311 }
5312
5313 static void
5314 bnx2_free_tx_skbs(struct bnx2 *bp)
5315 {
5316         int i;
5317
5318         for (i = 0; i < bp->num_tx_rings; i++) {
5319                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5320                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5321                 int j;
5322
5323                 if (txr->tx_buf_ring == NULL)
5324                         continue;
5325
5326                 for (j = 0; j < TX_DESC_CNT; ) {
5327                         struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5328                         struct sk_buff *skb = tx_buf->skb;
5329                         int k, last;
5330
5331                         if (skb == NULL) {
5332                                 j++;
5333                                 continue;
5334                         }
5335
5336                         pci_unmap_single(bp->pdev,
5337                                          dma_unmap_addr(tx_buf, mapping),
5338                                          skb_headlen(skb),
5339                                          PCI_DMA_TODEVICE);
5340
5341                         tx_buf->skb = NULL;
5342
5343                         last = tx_buf->nr_frags;
5344                         j++;
5345                         for (k = 0; k < last; k++, j++) {
5346                                 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5347                                 pci_unmap_page(bp->pdev,
5348                                         dma_unmap_addr(tx_buf, mapping),
5349                                         skb_shinfo(skb)->frags[k].size,
5350                                         PCI_DMA_TODEVICE);
5351                         }
5352                         dev_kfree_skb(skb);
5353                 }
5354         }
5355 }
5356
5357 static void
5358 bnx2_free_rx_skbs(struct bnx2 *bp)
5359 {
5360         int i;
5361
5362         for (i = 0; i < bp->num_rx_rings; i++) {
5363                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5364                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5365                 int j;
5366
5367                 if (rxr->rx_buf_ring == NULL)
5368                         return;
5369
5370                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5371                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5372                         struct sk_buff *skb = rx_buf->skb;
5373
5374                         if (skb == NULL)
5375                                 continue;
5376
5377                         pci_unmap_single(bp->pdev,
5378                                          dma_unmap_addr(rx_buf, mapping),
5379                                          bp->rx_buf_use_size,
5380                                          PCI_DMA_FROMDEVICE);
5381
5382                         rx_buf->skb = NULL;
5383
5384                         dev_kfree_skb(skb);
5385                 }
5386                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5387                         bnx2_free_rx_page(bp, rxr, j);
5388         }
5389 }
5390
5391 static void
5392 bnx2_free_skbs(struct bnx2 *bp)
5393 {
5394         bnx2_free_tx_skbs(bp);
5395         bnx2_free_rx_skbs(bp);
5396 }
5397
5398 static int
5399 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5400 {
5401         int rc;
5402
5403         rc = bnx2_reset_chip(bp, reset_code);
5404         bnx2_free_skbs(bp);
5405         if (rc)
5406                 return rc;
5407
5408         if ((rc = bnx2_init_chip(bp)) != 0)
5409                 return rc;
5410
5411         bnx2_init_all_rings(bp);
5412         return 0;
5413 }
5414
5415 static int
5416 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5417 {
5418         int rc;
5419
5420         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5421                 return rc;
5422
5423         spin_lock_bh(&bp->phy_lock);
5424         bnx2_init_phy(bp, reset_phy);
5425         bnx2_set_link(bp);
5426         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5427                 bnx2_remote_phy_event(bp);
5428         spin_unlock_bh(&bp->phy_lock);
5429         return 0;
5430 }
5431
5432 static int
5433 bnx2_shutdown_chip(struct bnx2 *bp)
5434 {
5435         u32 reset_code;
5436
5437         if (bp->flags & BNX2_FLAG_NO_WOL)
5438                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5439         else if (bp->wol)
5440                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5441         else
5442                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5443
5444         return bnx2_reset_chip(bp, reset_code);
5445 }
5446
5447 static int
5448 bnx2_test_registers(struct bnx2 *bp)
5449 {
5450         int ret;
5451         int i, is_5709;
5452         static const struct {
5453                 u16   offset;
5454                 u16   flags;
5455 #define BNX2_FL_NOT_5709        1
5456                 u32   rw_mask;
5457                 u32   ro_mask;
5458         } reg_tbl[] = {
5459                 { 0x006c, 0, 0x00000000, 0x0000003f },
5460                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5461                 { 0x0094, 0, 0x00000000, 0x00000000 },
5462
5463                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5464                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5465                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5466                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5467                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5468                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5469                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5470                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5471                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5472
5473                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5474                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5475                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5476                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5477                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5478                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5479
5480                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5481                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5482                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5483
5484                 { 0x1000, 0, 0x00000000, 0x00000001 },
5485                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5486
5487                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5488                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5489                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5490                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5491                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5492                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5493                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5494                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5495                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5496                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5497
5498                 { 0x1800, 0, 0x00000000, 0x00000001 },
5499                 { 0x1804, 0, 0x00000000, 0x00000003 },
5500
5501                 { 0x2800, 0, 0x00000000, 0x00000001 },
5502                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5503                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5504                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5505                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5506                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5507                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5508                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5509                 { 0x2840, 0, 0x00000000, 0xffffffff },
5510                 { 0x2844, 0, 0x00000000, 0xffffffff },
5511                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5512                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5513
5514                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5515                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5516
5517                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5518                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5519                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5520                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5521                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5522                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5523                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5524                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5525                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5526
5527                 { 0x5004, 0, 0x00000000, 0x0000007f },
5528                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5529
5530                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5531                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5532                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5533                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5534                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5535                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5536                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5537                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5538                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5539
5540                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5541                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5542                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5543                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5544                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5545                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5546                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5547                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5548                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5549                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5550                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5551                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5552                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5553                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5554                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5555                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5556                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5557                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5558                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5559                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5560                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5561                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5562                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5563
5564                 { 0xffff, 0, 0x00000000, 0x00000000 },
5565         };
5566
5567         ret = 0;
5568         is_5709 = 0;
5569         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5570                 is_5709 = 1;
5571
5572         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5573                 u32 offset, rw_mask, ro_mask, save_val, val;
5574                 u16 flags = reg_tbl[i].flags;
5575
5576                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5577                         continue;
5578
5579                 offset = (u32) reg_tbl[i].offset;
5580                 rw_mask = reg_tbl[i].rw_mask;
5581                 ro_mask = reg_tbl[i].ro_mask;
5582
5583                 save_val = readl(bp->regview + offset);
5584
5585                 writel(0, bp->regview + offset);
5586
5587                 val = readl(bp->regview + offset);
5588                 if ((val & rw_mask) != 0) {
5589                         goto reg_test_err;
5590                 }
5591
5592                 if ((val & ro_mask) != (save_val & ro_mask)) {
5593                         goto reg_test_err;
5594                 }
5595
5596                 writel(0xffffffff, bp->regview + offset);
5597
5598                 val = readl(bp->regview + offset);
5599                 if ((val & rw_mask) != rw_mask) {
5600                         goto reg_test_err;
5601                 }
5602
5603                 if ((val & ro_mask) != (save_val & ro_mask)) {
5604                         goto reg_test_err;
5605                 }
5606
5607                 writel(save_val, bp->regview + offset);
5608                 continue;
5609
5610 reg_test_err:
5611                 writel(save_val, bp->regview + offset);
5612                 ret = -ENODEV;
5613                 break;
5614         }
5615         return ret;
5616 }
5617
5618 static int
5619 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5620 {
5621         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5622                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5623         int i;
5624
5625         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5626                 u32 offset;
5627
5628                 for (offset = 0; offset < size; offset += 4) {
5629
5630                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5631
5632                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5633                                 test_pattern[i]) {
5634                                 return -ENODEV;
5635                         }
5636                 }
5637         }
5638         return 0;
5639 }
5640
5641 static int
5642 bnx2_test_memory(struct bnx2 *bp)
5643 {
5644         int ret = 0;
5645         int i;
5646         static struct mem_entry {
5647                 u32   offset;
5648                 u32   len;
5649         } mem_tbl_5706[] = {
5650                 { 0x60000,  0x4000 },
5651                 { 0xa0000,  0x3000 },
5652                 { 0xe0000,  0x4000 },
5653                 { 0x120000, 0x4000 },
5654                 { 0x1a0000, 0x4000 },
5655                 { 0x160000, 0x4000 },
5656                 { 0xffffffff, 0    },
5657         },
5658         mem_tbl_5709[] = {
5659                 { 0x60000,  0x4000 },
5660                 { 0xa0000,  0x3000 },
5661                 { 0xe0000,  0x4000 },
5662                 { 0x120000, 0x4000 },
5663                 { 0x1a0000, 0x4000 },
5664                 { 0xffffffff, 0    },
5665         };
5666         struct mem_entry *mem_tbl;
5667
5668         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5669                 mem_tbl = mem_tbl_5709;
5670         else
5671                 mem_tbl = mem_tbl_5706;
5672
5673         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5674                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5675                         mem_tbl[i].len)) != 0) {
5676                         return ret;
5677                 }
5678         }
5679
5680         return ret;
5681 }
5682
5683 #define BNX2_MAC_LOOPBACK       0
5684 #define BNX2_PHY_LOOPBACK       1
5685
5686 static int
5687 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5688 {
5689         unsigned int pkt_size, num_pkts, i;
5690         struct sk_buff *skb, *rx_skb;
5691         unsigned char *packet;
5692         u16 rx_start_idx, rx_idx;
5693         dma_addr_t map;
5694         struct tx_bd *txbd;
5695         struct sw_bd *rx_buf;
5696         struct l2_fhdr *rx_hdr;
5697         int ret = -ENODEV;
5698         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5699         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5700         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5701
5702         tx_napi = bnapi;
5703
5704         txr = &tx_napi->tx_ring;
5705         rxr = &bnapi->rx_ring;
5706         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5707                 bp->loopback = MAC_LOOPBACK;
5708                 bnx2_set_mac_loopback(bp);
5709         }
5710         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5711                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5712                         return 0;
5713
5714                 bp->loopback = PHY_LOOPBACK;
5715                 bnx2_set_phy_loopback(bp);
5716         }
5717         else
5718                 return -EINVAL;
5719
5720         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5721         skb = netdev_alloc_skb(bp->dev, pkt_size);
5722         if (!skb)
5723                 return -ENOMEM;
5724         packet = skb_put(skb, pkt_size);
5725         memcpy(packet, bp->dev->dev_addr, 6);
5726         memset(packet + 6, 0x0, 8);
5727         for (i = 14; i < pkt_size; i++)
5728                 packet[i] = (unsigned char) (i & 0xff);
5729
5730         map = pci_map_single(bp->pdev, skb->data, pkt_size,
5731                 PCI_DMA_TODEVICE);
5732         if (pci_dma_mapping_error(bp->pdev, map)) {
5733                 dev_kfree_skb(skb);
5734                 return -EIO;
5735         }
5736
5737         REG_WR(bp, BNX2_HC_COMMAND,
5738                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5739
5740         REG_RD(bp, BNX2_HC_COMMAND);
5741
5742         udelay(5);
5743         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5744
5745         num_pkts = 0;
5746
5747         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5748
5749         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5750         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5751         txbd->tx_bd_mss_nbytes = pkt_size;
5752         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5753
5754         num_pkts++;
5755         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5756         txr->tx_prod_bseq += pkt_size;
5757
5758         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5759         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5760
5761         udelay(100);
5762
5763         REG_WR(bp, BNX2_HC_COMMAND,
5764                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5765
5766         REG_RD(bp, BNX2_HC_COMMAND);
5767
5768         udelay(5);
5769
5770         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5771         dev_kfree_skb(skb);
5772
5773         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5774                 goto loopback_test_done;
5775
5776         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5777         if (rx_idx != rx_start_idx + num_pkts) {
5778                 goto loopback_test_done;
5779         }
5780
5781         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5782         rx_skb = rx_buf->skb;
5783
5784         rx_hdr = rx_buf->desc;
5785         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5786
5787         pci_dma_sync_single_for_cpu(bp->pdev,
5788                 dma_unmap_addr(rx_buf, mapping),
5789                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5790
5791         if (rx_hdr->l2_fhdr_status &
5792                 (L2_FHDR_ERRORS_BAD_CRC |
5793                 L2_FHDR_ERRORS_PHY_DECODE |
5794                 L2_FHDR_ERRORS_ALIGNMENT |
5795                 L2_FHDR_ERRORS_TOO_SHORT |
5796                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5797
5798                 goto loopback_test_done;
5799         }
5800
5801         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5802                 goto loopback_test_done;
5803         }
5804
5805         for (i = 14; i < pkt_size; i++) {
5806                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5807                         goto loopback_test_done;
5808                 }
5809         }
5810
5811         ret = 0;
5812
5813 loopback_test_done:
5814         bp->loopback = 0;
5815         return ret;
5816 }
5817
5818 #define BNX2_MAC_LOOPBACK_FAILED        1
5819 #define BNX2_PHY_LOOPBACK_FAILED        2
5820 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5821                                          BNX2_PHY_LOOPBACK_FAILED)
5822
5823 static int
5824 bnx2_test_loopback(struct bnx2 *bp)
5825 {
5826         int rc = 0;
5827
5828         if (!netif_running(bp->dev))
5829                 return BNX2_LOOPBACK_FAILED;
5830
5831         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5832         spin_lock_bh(&bp->phy_lock);
5833         bnx2_init_phy(bp, 1);
5834         spin_unlock_bh(&bp->phy_lock);
5835         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5836                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5837         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5838                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5839         return rc;
5840 }
5841
5842 #define NVRAM_SIZE 0x200
5843 #define CRC32_RESIDUAL 0xdebb20e3
5844
5845 static int
5846 bnx2_test_nvram(struct bnx2 *bp)
5847 {
5848         __be32 buf[NVRAM_SIZE / 4];
5849         u8 *data = (u8 *) buf;
5850         int rc = 0;
5851         u32 magic, csum;
5852
5853         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5854                 goto test_nvram_done;
5855
5856         magic = be32_to_cpu(buf[0]);
5857         if (magic != 0x669955aa) {
5858                 rc = -ENODEV;
5859                 goto test_nvram_done;
5860         }
5861
5862         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5863                 goto test_nvram_done;
5864
5865         csum = ether_crc_le(0x100, data);
5866         if (csum != CRC32_RESIDUAL) {
5867                 rc = -ENODEV;
5868                 goto test_nvram_done;
5869         }
5870
5871         csum = ether_crc_le(0x100, data + 0x100);
5872         if (csum != CRC32_RESIDUAL) {
5873                 rc = -ENODEV;
5874         }
5875
5876 test_nvram_done:
5877         return rc;
5878 }
5879
5880 static int
5881 bnx2_test_link(struct bnx2 *bp)
5882 {
5883         u32 bmsr;
5884
5885         if (!netif_running(bp->dev))
5886                 return -ENODEV;
5887
5888         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5889                 if (bp->link_up)
5890                         return 0;
5891                 return -ENODEV;
5892         }
5893         spin_lock_bh(&bp->phy_lock);
5894         bnx2_enable_bmsr1(bp);
5895         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5896         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5897         bnx2_disable_bmsr1(bp);
5898         spin_unlock_bh(&bp->phy_lock);
5899
5900         if (bmsr & BMSR_LSTATUS) {
5901                 return 0;
5902         }
5903         return -ENODEV;
5904 }
5905
5906 static int
5907 bnx2_test_intr(struct bnx2 *bp)
5908 {
5909         int i;
5910         u16 status_idx;
5911
5912         if (!netif_running(bp->dev))
5913                 return -ENODEV;
5914
5915         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5916
5917         /* This register is not touched during run-time. */
5918         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5919         REG_RD(bp, BNX2_HC_COMMAND);
5920
5921         for (i = 0; i < 10; i++) {
5922                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5923                         status_idx) {
5924
5925                         break;
5926                 }
5927
5928                 msleep_interruptible(10);
5929         }
5930         if (i < 10)
5931                 return 0;
5932
5933         return -ENODEV;
5934 }
5935
5936 /* Determining link for parallel detection. */
5937 static int
5938 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5939 {
5940         u32 mode_ctl, an_dbg, exp;
5941
5942         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5943                 return 0;
5944
5945         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5946         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5947
5948         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5949                 return 0;
5950
5951         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5952         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5953         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5954
5955         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5956                 return 0;
5957
5958         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5959         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5960         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5961
5962         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5963                 return 0;
5964
5965         return 1;
5966 }
5967
5968 static void
5969 bnx2_5706_serdes_timer(struct bnx2 *bp)
5970 {
5971         int check_link = 1;
5972
5973         spin_lock(&bp->phy_lock);
5974         if (bp->serdes_an_pending) {
5975                 bp->serdes_an_pending--;
5976                 check_link = 0;
5977         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5978                 u32 bmcr;
5979
5980                 bp->current_interval = BNX2_TIMER_INTERVAL;
5981
5982                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5983
5984                 if (bmcr & BMCR_ANENABLE) {
5985                         if (bnx2_5706_serdes_has_link(bp)) {
5986                                 bmcr &= ~BMCR_ANENABLE;
5987                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5988                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5989                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5990                         }
5991                 }
5992         }
5993         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5994                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5995                 u32 phy2;
5996
5997                 bnx2_write_phy(bp, 0x17, 0x0f01);
5998                 bnx2_read_phy(bp, 0x15, &phy2);
5999                 if (phy2 & 0x20) {
6000                         u32 bmcr;
6001
6002                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6003                         bmcr |= BMCR_ANENABLE;
6004                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6005
6006                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6007                 }
6008         } else
6009                 bp->current_interval = BNX2_TIMER_INTERVAL;
6010
6011         if (check_link) {
6012                 u32 val;
6013
6014                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6015                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6016                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6017
6018                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6019                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6020                                 bnx2_5706s_force_link_dn(bp, 1);
6021                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6022                         } else
6023                                 bnx2_set_link(bp);
6024                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6025                         bnx2_set_link(bp);
6026         }
6027         spin_unlock(&bp->phy_lock);
6028 }
6029
6030 static void
6031 bnx2_5708_serdes_timer(struct bnx2 *bp)
6032 {
6033         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6034                 return;
6035
6036         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6037                 bp->serdes_an_pending = 0;
6038                 return;
6039         }
6040
6041         spin_lock(&bp->phy_lock);
6042         if (bp->serdes_an_pending)
6043                 bp->serdes_an_pending--;
6044         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6045                 u32 bmcr;
6046
6047                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6048                 if (bmcr & BMCR_ANENABLE) {
6049                         bnx2_enable_forced_2g5(bp);
6050                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6051                 } else {
6052                         bnx2_disable_forced_2g5(bp);
6053                         bp->serdes_an_pending = 2;
6054                         bp->current_interval = BNX2_TIMER_INTERVAL;
6055                 }
6056
6057         } else
6058                 bp->current_interval = BNX2_TIMER_INTERVAL;
6059
6060         spin_unlock(&bp->phy_lock);
6061 }
6062
6063 static void
6064 bnx2_timer(unsigned long data)
6065 {
6066         struct bnx2 *bp = (struct bnx2 *) data;
6067
6068         if (!netif_running(bp->dev))
6069                 return;
6070
6071         if (atomic_read(&bp->intr_sem) != 0)
6072                 goto bnx2_restart_timer;
6073
6074         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6075              BNX2_FLAG_USING_MSI)
6076                 bnx2_chk_missed_msi(bp);
6077
6078         bnx2_send_heart_beat(bp);
6079
6080         bp->stats_blk->stat_FwRxDrop =
6081                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6082
6083         /* workaround occasional corrupted counters */
6084         if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6085                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6086                                             BNX2_HC_COMMAND_STATS_NOW);
6087
6088         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6089                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6090                         bnx2_5706_serdes_timer(bp);
6091                 else
6092                         bnx2_5708_serdes_timer(bp);
6093         }
6094
6095 bnx2_restart_timer:
6096         mod_timer(&bp->timer, jiffies + bp->current_interval);
6097 }
6098
6099 static int
6100 bnx2_request_irq(struct bnx2 *bp)
6101 {
6102         unsigned long flags;
6103         struct bnx2_irq *irq;
6104         int rc = 0, i;
6105
6106         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6107                 flags = 0;
6108         else
6109                 flags = IRQF_SHARED;
6110
6111         for (i = 0; i < bp->irq_nvecs; i++) {
6112                 irq = &bp->irq_tbl[i];
6113                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6114                                  &bp->bnx2_napi[i]);
6115                 if (rc)
6116                         break;
6117                 irq->requested = 1;
6118         }
6119         return rc;
6120 }
6121
6122 static void
6123 bnx2_free_irq(struct bnx2 *bp)
6124 {
6125         struct bnx2_irq *irq;
6126         int i;
6127
6128         for (i = 0; i < bp->irq_nvecs; i++) {
6129                 irq = &bp->irq_tbl[i];
6130                 if (irq->requested)
6131                         free_irq(irq->vector, &bp->bnx2_napi[i]);
6132                 irq->requested = 0;
6133         }
6134         if (bp->flags & BNX2_FLAG_USING_MSI)
6135                 pci_disable_msi(bp->pdev);
6136         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6137                 pci_disable_msix(bp->pdev);
6138
6139         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6140 }
6141
6142 static void
6143 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6144 {
6145         int i, rc;
6146         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6147         struct net_device *dev = bp->dev;
6148         const int len = sizeof(bp->irq_tbl[0].name);
6149
6150         bnx2_setup_msix_tbl(bp);
6151         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6152         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6153         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6154
6155         /*  Need to flush the previous three writes to ensure MSI-X
6156          *  is setup properly */
6157         REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6158
6159         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6160                 msix_ent[i].entry = i;
6161                 msix_ent[i].vector = 0;
6162         }
6163
6164         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
6165         if (rc != 0)
6166                 return;
6167
6168         bp->irq_nvecs = msix_vecs;
6169         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6170         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6171                 bp->irq_tbl[i].vector = msix_ent[i].vector;
6172                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6173                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6174         }
6175 }
6176
6177 static void
6178 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6179 {
6180         int cpus = num_online_cpus();
6181         int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6182
6183         bp->irq_tbl[0].handler = bnx2_interrupt;
6184         strcpy(bp->irq_tbl[0].name, bp->dev->name);
6185         bp->irq_nvecs = 1;
6186         bp->irq_tbl[0].vector = bp->pdev->irq;
6187
6188         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6189                 bnx2_enable_msix(bp, msix_vecs);
6190
6191         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6192             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6193                 if (pci_enable_msi(bp->pdev) == 0) {
6194                         bp->flags |= BNX2_FLAG_USING_MSI;
6195                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6196                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6197                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6198                         } else
6199                                 bp->irq_tbl[0].handler = bnx2_msi;
6200
6201                         bp->irq_tbl[0].vector = bp->pdev->irq;
6202                 }
6203         }
6204
6205         bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6206         bp->dev->real_num_tx_queues = bp->num_tx_rings;
6207
6208         bp->num_rx_rings = bp->irq_nvecs;
6209 }
6210
6211 /* Called with rtnl_lock */
6212 static int
6213 bnx2_open(struct net_device *dev)
6214 {
6215         struct bnx2 *bp = netdev_priv(dev);
6216         int rc;
6217
6218         netif_carrier_off(dev);
6219
6220         bnx2_set_power_state(bp, PCI_D0);
6221         bnx2_disable_int(bp);
6222
6223         bnx2_setup_int_mode(bp, disable_msi);
6224         bnx2_init_napi(bp);
6225         bnx2_napi_enable(bp);
6226         rc = bnx2_alloc_mem(bp);
6227         if (rc)
6228                 goto open_err;
6229
6230         rc = bnx2_request_irq(bp);
6231         if (rc)
6232                 goto open_err;
6233
6234         rc = bnx2_init_nic(bp, 1);
6235         if (rc)
6236                 goto open_err;
6237
6238         mod_timer(&bp->timer, jiffies + bp->current_interval);
6239
6240         atomic_set(&bp->intr_sem, 0);
6241
6242         memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6243
6244         bnx2_enable_int(bp);
6245
6246         if (bp->flags & BNX2_FLAG_USING_MSI) {
6247                 /* Test MSI to make sure it is working
6248                  * If MSI test fails, go back to INTx mode
6249                  */
6250                 if (bnx2_test_intr(bp) != 0) {
6251                         netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6252
6253                         bnx2_disable_int(bp);
6254                         bnx2_free_irq(bp);
6255
6256                         bnx2_setup_int_mode(bp, 1);
6257
6258                         rc = bnx2_init_nic(bp, 0);
6259
6260                         if (!rc)
6261                                 rc = bnx2_request_irq(bp);
6262
6263                         if (rc) {
6264                                 del_timer_sync(&bp->timer);
6265                                 goto open_err;
6266                         }
6267                         bnx2_enable_int(bp);
6268                 }
6269         }
6270         if (bp->flags & BNX2_FLAG_USING_MSI)
6271                 netdev_info(dev, "using MSI\n");
6272         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6273                 netdev_info(dev, "using MSIX\n");
6274
6275         netif_tx_start_all_queues(dev);
6276
6277         return 0;
6278
6279 open_err:
6280         bnx2_napi_disable(bp);
6281         bnx2_free_skbs(bp);
6282         bnx2_free_irq(bp);
6283         bnx2_free_mem(bp);
6284         bnx2_del_napi(bp);
6285         return rc;
6286 }
6287
6288 static void
6289 bnx2_reset_task(struct work_struct *work)
6290 {
6291         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6292
6293         rtnl_lock();
6294         if (!netif_running(bp->dev)) {
6295                 rtnl_unlock();
6296                 return;
6297         }
6298
6299         bnx2_netif_stop(bp, true);
6300
6301         bnx2_init_nic(bp, 1);
6302
6303         atomic_set(&bp->intr_sem, 1);
6304         bnx2_netif_start(bp, true);
6305         rtnl_unlock();
6306 }
6307
6308 static void
6309 bnx2_dump_state(struct bnx2 *bp)
6310 {
6311         struct net_device *dev = bp->dev;
6312         u32 mcp_p0, mcp_p1;
6313
6314         netdev_err(dev, "DEBUG: intr_sem[%x]\n", atomic_read(&bp->intr_sem));
6315         netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6316                    REG_RD(bp, BNX2_EMAC_TX_STATUS),
6317                    REG_RD(bp, BNX2_EMAC_RX_STATUS));
6318         netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6319                    REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6320         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6321                 mcp_p0 = BNX2_MCP_STATE_P0;
6322                 mcp_p1 = BNX2_MCP_STATE_P1;
6323         } else {
6324                 mcp_p0 = BNX2_MCP_STATE_P0_5708;
6325                 mcp_p1 = BNX2_MCP_STATE_P1_5708;
6326         }
6327         netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6328                    bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
6329         netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6330                    REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6331         if (bp->flags & BNX2_FLAG_USING_MSIX)
6332                 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6333                            REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6334 }
6335
6336 static void
6337 bnx2_tx_timeout(struct net_device *dev)
6338 {
6339         struct bnx2 *bp = netdev_priv(dev);
6340
6341         bnx2_dump_state(bp);
6342
6343         /* This allows the netif to be shutdown gracefully before resetting */
6344         schedule_work(&bp->reset_task);
6345 }
6346
6347 #ifdef BCM_VLAN
6348 /* Called with rtnl_lock */
6349 static void
6350 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6351 {
6352         struct bnx2 *bp = netdev_priv(dev);
6353
6354         if (netif_running(dev))
6355                 bnx2_netif_stop(bp, false);
6356
6357         bp->vlgrp = vlgrp;
6358
6359         if (!netif_running(dev))
6360                 return;
6361
6362         bnx2_set_rx_mode(dev);
6363         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6364                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6365
6366         bnx2_netif_start(bp, false);
6367 }
6368 #endif
6369
6370 /* Called with netif_tx_lock.
6371  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6372  * netif_wake_queue().
6373  */
6374 static netdev_tx_t
6375 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6376 {
6377         struct bnx2 *bp = netdev_priv(dev);
6378         dma_addr_t mapping;
6379         struct tx_bd *txbd;
6380         struct sw_tx_bd *tx_buf;
6381         u32 len, vlan_tag_flags, last_frag, mss;
6382         u16 prod, ring_prod;
6383         int i;
6384         struct bnx2_napi *bnapi;
6385         struct bnx2_tx_ring_info *txr;
6386         struct netdev_queue *txq;
6387
6388         /*  Determine which tx ring we will be placed on */
6389         i = skb_get_queue_mapping(skb);
6390         bnapi = &bp->bnx2_napi[i];
6391         txr = &bnapi->tx_ring;
6392         txq = netdev_get_tx_queue(dev, i);
6393
6394         if (unlikely(bnx2_tx_avail(bp, txr) <
6395             (skb_shinfo(skb)->nr_frags + 1))) {
6396                 netif_tx_stop_queue(txq);
6397                 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6398
6399                 return NETDEV_TX_BUSY;
6400         }
6401         len = skb_headlen(skb);
6402         prod = txr->tx_prod;
6403         ring_prod = TX_RING_IDX(prod);
6404
6405         vlan_tag_flags = 0;
6406         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6407                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6408         }
6409
6410 #ifdef BCM_VLAN
6411         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6412                 vlan_tag_flags |=
6413                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6414         }
6415 #endif
6416         if ((mss = skb_shinfo(skb)->gso_size)) {
6417                 u32 tcp_opt_len;
6418                 struct iphdr *iph;
6419
6420                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6421
6422                 tcp_opt_len = tcp_optlen(skb);
6423
6424                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6425                         u32 tcp_off = skb_transport_offset(skb) -
6426                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6427
6428                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6429                                           TX_BD_FLAGS_SW_FLAGS;
6430                         if (likely(tcp_off == 0))
6431                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6432                         else {
6433                                 tcp_off >>= 3;
6434                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6435                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6436                                                   ((tcp_off & 0x10) <<
6437                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6438                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6439                         }
6440                 } else {
6441                         iph = ip_hdr(skb);
6442                         if (tcp_opt_len || (iph->ihl > 5)) {
6443                                 vlan_tag_flags |= ((iph->ihl - 5) +
6444                                                    (tcp_opt_len >> 2)) << 8;
6445                         }
6446                 }
6447         } else
6448                 mss = 0;
6449
6450         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6451         if (pci_dma_mapping_error(bp->pdev, mapping)) {
6452                 dev_kfree_skb(skb);
6453                 return NETDEV_TX_OK;
6454         }
6455
6456         tx_buf = &txr->tx_buf_ring[ring_prod];
6457         tx_buf->skb = skb;
6458         dma_unmap_addr_set(tx_buf, mapping, mapping);
6459
6460         txbd = &txr->tx_desc_ring[ring_prod];
6461
6462         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6463         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6464         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6465         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6466
6467         last_frag = skb_shinfo(skb)->nr_frags;
6468         tx_buf->nr_frags = last_frag;
6469         tx_buf->is_gso = skb_is_gso(skb);
6470
6471         for (i = 0; i < last_frag; i++) {
6472                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6473
6474                 prod = NEXT_TX_BD(prod);
6475                 ring_prod = TX_RING_IDX(prod);
6476                 txbd = &txr->tx_desc_ring[ring_prod];
6477
6478                 len = frag->size;
6479                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
6480                         len, PCI_DMA_TODEVICE);
6481                 if (pci_dma_mapping_error(bp->pdev, mapping))
6482                         goto dma_error;
6483                 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6484                                    mapping);
6485
6486                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6487                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6488                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6489                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6490
6491         }
6492         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6493
6494         prod = NEXT_TX_BD(prod);
6495         txr->tx_prod_bseq += skb->len;
6496
6497         REG_WR16(bp, txr->tx_bidx_addr, prod);
6498         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6499
6500         mmiowb();
6501
6502         txr->tx_prod = prod;
6503
6504         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6505                 netif_tx_stop_queue(txq);
6506                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6507                         netif_tx_wake_queue(txq);
6508         }
6509
6510         return NETDEV_TX_OK;
6511 dma_error:
6512         /* save value of frag that failed */
6513         last_frag = i;
6514
6515         /* start back at beginning and unmap skb */
6516         prod = txr->tx_prod;
6517         ring_prod = TX_RING_IDX(prod);
6518         tx_buf = &txr->tx_buf_ring[ring_prod];
6519         tx_buf->skb = NULL;
6520         pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
6521                          skb_headlen(skb), PCI_DMA_TODEVICE);
6522
6523         /* unmap remaining mapped pages */
6524         for (i = 0; i < last_frag; i++) {
6525                 prod = NEXT_TX_BD(prod);
6526                 ring_prod = TX_RING_IDX(prod);
6527                 tx_buf = &txr->tx_buf_ring[ring_prod];
6528                 pci_unmap_page(bp->pdev, dma_unmap_addr(tx_buf, mapping),
6529                                skb_shinfo(skb)->frags[i].size,
6530                                PCI_DMA_TODEVICE);
6531         }
6532
6533         dev_kfree_skb(skb);
6534         return NETDEV_TX_OK;
6535 }
6536
6537 /* Called with rtnl_lock */
6538 static int
6539 bnx2_close(struct net_device *dev)
6540 {
6541         struct bnx2 *bp = netdev_priv(dev);
6542
6543         cancel_work_sync(&bp->reset_task);
6544
6545         bnx2_disable_int_sync(bp);
6546         bnx2_napi_disable(bp);
6547         del_timer_sync(&bp->timer);
6548         bnx2_shutdown_chip(bp);
6549         bnx2_free_irq(bp);
6550         bnx2_free_skbs(bp);
6551         bnx2_free_mem(bp);
6552         bnx2_del_napi(bp);
6553         bp->link_up = 0;
6554         netif_carrier_off(bp->dev);
6555         bnx2_set_power_state(bp, PCI_D3hot);
6556         return 0;
6557 }
6558
6559 static void
6560 bnx2_save_stats(struct bnx2 *bp)
6561 {
6562         u32 *hw_stats = (u32 *) bp->stats_blk;
6563         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6564         int i;
6565
6566         /* The 1st 10 counters are 64-bit counters */
6567         for (i = 0; i < 20; i += 2) {
6568                 u32 hi;
6569                 u64 lo;
6570
6571                 hi = temp_stats[i] + hw_stats[i];
6572                 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6573                 if (lo > 0xffffffff)
6574                         hi++;
6575                 temp_stats[i] = hi;
6576                 temp_stats[i + 1] = lo & 0xffffffff;
6577         }
6578
6579         for ( ; i < sizeof(struct statistics_block) / 4; i++)
6580                 temp_stats[i] += hw_stats[i];
6581 }
6582
6583 #define GET_64BIT_NET_STATS64(ctr)                              \
6584         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
6585         (unsigned long) (ctr##_lo)
6586
6587 #define GET_64BIT_NET_STATS32(ctr)                              \
6588         (ctr##_lo)
6589
6590 #if (BITS_PER_LONG == 64)
6591 #define GET_64BIT_NET_STATS(ctr)                                \
6592         GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +             \
6593         GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6594 #else
6595 #define GET_64BIT_NET_STATS(ctr)                                \
6596         GET_64BIT_NET_STATS32(bp->stats_blk->ctr) +             \
6597         GET_64BIT_NET_STATS32(bp->temp_stats_blk->ctr)
6598 #endif
6599
6600 #define GET_32BIT_NET_STATS(ctr)                                \
6601         (unsigned long) (bp->stats_blk->ctr +                   \
6602                          bp->temp_stats_blk->ctr)
6603
6604 static struct net_device_stats *
6605 bnx2_get_stats(struct net_device *dev)
6606 {
6607         struct bnx2 *bp = netdev_priv(dev);
6608         struct net_device_stats *net_stats = &dev->stats;
6609
6610         if (bp->stats_blk == NULL) {
6611                 return net_stats;
6612         }
6613         net_stats->rx_packets =
6614                 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6615                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6616                 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6617
6618         net_stats->tx_packets =
6619                 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6620                 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6621                 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6622
6623         net_stats->rx_bytes =
6624                 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6625
6626         net_stats->tx_bytes =
6627                 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6628
6629         net_stats->multicast =
6630                 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts);
6631
6632         net_stats->collisions =
6633                 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6634
6635         net_stats->rx_length_errors =
6636                 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6637                 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6638
6639         net_stats->rx_over_errors =
6640                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6641                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6642
6643         net_stats->rx_frame_errors =
6644                 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6645
6646         net_stats->rx_crc_errors =
6647                 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6648
6649         net_stats->rx_errors = net_stats->rx_length_errors +
6650                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6651                 net_stats->rx_crc_errors;
6652
6653         net_stats->tx_aborted_errors =
6654                 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6655                 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6656
6657         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6658             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6659                 net_stats->tx_carrier_errors = 0;
6660         else {
6661                 net_stats->tx_carrier_errors =
6662                         GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6663         }
6664
6665         net_stats->tx_errors =
6666                 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6667                 net_stats->tx_aborted_errors +
6668                 net_stats->tx_carrier_errors;
6669
6670         net_stats->rx_missed_errors =
6671                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6672                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6673                 GET_32BIT_NET_STATS(stat_FwRxDrop);
6674
6675         return net_stats;
6676 }
6677
6678 /* All ethtool functions called with rtnl_lock */
6679
6680 static int
6681 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6682 {
6683         struct bnx2 *bp = netdev_priv(dev);
6684         int support_serdes = 0, support_copper = 0;
6685
6686         cmd->supported = SUPPORTED_Autoneg;
6687         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6688                 support_serdes = 1;
6689                 support_copper = 1;
6690         } else if (bp->phy_port == PORT_FIBRE)
6691                 support_serdes = 1;
6692         else
6693                 support_copper = 1;
6694
6695         if (support_serdes) {
6696                 cmd->supported |= SUPPORTED_1000baseT_Full |
6697                         SUPPORTED_FIBRE;
6698                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6699                         cmd->supported |= SUPPORTED_2500baseX_Full;
6700
6701         }
6702         if (support_copper) {
6703                 cmd->supported |= SUPPORTED_10baseT_Half |
6704                         SUPPORTED_10baseT_Full |
6705                         SUPPORTED_100baseT_Half |
6706                         SUPPORTED_100baseT_Full |
6707                         SUPPORTED_1000baseT_Full |
6708                         SUPPORTED_TP;
6709
6710         }
6711
6712         spin_lock_bh(&bp->phy_lock);
6713         cmd->port = bp->phy_port;
6714         cmd->advertising = bp->advertising;
6715
6716         if (bp->autoneg & AUTONEG_SPEED) {
6717                 cmd->autoneg = AUTONEG_ENABLE;
6718         }
6719         else {
6720                 cmd->autoneg = AUTONEG_DISABLE;
6721         }
6722
6723         if (netif_carrier_ok(dev)) {
6724                 cmd->speed = bp->line_speed;
6725                 cmd->duplex = bp->duplex;
6726         }
6727         else {
6728                 cmd->speed = -1;
6729                 cmd->duplex = -1;
6730         }
6731         spin_unlock_bh(&bp->phy_lock);
6732
6733         cmd->transceiver = XCVR_INTERNAL;
6734         cmd->phy_address = bp->phy_addr;
6735
6736         return 0;
6737 }
6738
6739 static int
6740 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6741 {
6742         struct bnx2 *bp = netdev_priv(dev);
6743         u8 autoneg = bp->autoneg;
6744         u8 req_duplex = bp->req_duplex;
6745         u16 req_line_speed = bp->req_line_speed;
6746         u32 advertising = bp->advertising;
6747         int err = -EINVAL;
6748
6749         spin_lock_bh(&bp->phy_lock);
6750
6751         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6752                 goto err_out_unlock;
6753
6754         if (cmd->port != bp->phy_port &&
6755             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6756                 goto err_out_unlock;
6757
6758         /* If device is down, we can store the settings only if the user
6759          * is setting the currently active port.
6760          */
6761         if (!netif_running(dev) && cmd->port != bp->phy_port)
6762                 goto err_out_unlock;
6763
6764         if (cmd->autoneg == AUTONEG_ENABLE) {
6765                 autoneg |= AUTONEG_SPEED;
6766
6767                 advertising = cmd->advertising;
6768                 if (cmd->port == PORT_TP) {
6769                         advertising &= ETHTOOL_ALL_COPPER_SPEED;
6770                         if (!advertising)
6771                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6772                 } else {
6773                         advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6774                         if (!advertising)
6775                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6776                 }
6777                 advertising |= ADVERTISED_Autoneg;
6778         }
6779         else {
6780                 if (cmd->port == PORT_FIBRE) {
6781                         if ((cmd->speed != SPEED_1000 &&
6782                              cmd->speed != SPEED_2500) ||
6783                             (cmd->duplex != DUPLEX_FULL))
6784                                 goto err_out_unlock;
6785
6786                         if (cmd->speed == SPEED_2500 &&
6787                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6788                                 goto err_out_unlock;
6789                 }
6790                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6791                         goto err_out_unlock;
6792
6793                 autoneg &= ~AUTONEG_SPEED;
6794                 req_line_speed = cmd->speed;
6795                 req_duplex = cmd->duplex;
6796                 advertising = 0;
6797         }
6798
6799         bp->autoneg = autoneg;
6800         bp->advertising = advertising;
6801         bp->req_line_speed = req_line_speed;
6802         bp->req_duplex = req_duplex;
6803
6804         err = 0;
6805         /* If device is down, the new settings will be picked up when it is
6806          * brought up.
6807          */
6808         if (netif_running(dev))
6809                 err = bnx2_setup_phy(bp, cmd->port);
6810
6811 err_out_unlock:
6812         spin_unlock_bh(&bp->phy_lock);
6813
6814         return err;
6815 }
6816
6817 static void
6818 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6819 {
6820         struct bnx2 *bp = netdev_priv(dev);
6821
6822         strcpy(info->driver, DRV_MODULE_NAME);
6823         strcpy(info->version, DRV_MODULE_VERSION);
6824         strcpy(info->bus_info, pci_name(bp->pdev));
6825         strcpy(info->fw_version, bp->fw_version);
6826 }
6827
6828 #define BNX2_REGDUMP_LEN                (32 * 1024)
6829
6830 static int
6831 bnx2_get_regs_len(struct net_device *dev)
6832 {
6833         return BNX2_REGDUMP_LEN;
6834 }
6835
6836 static void
6837 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6838 {
6839         u32 *p = _p, i, offset;
6840         u8 *orig_p = _p;
6841         struct bnx2 *bp = netdev_priv(dev);
6842         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6843                                  0x0800, 0x0880, 0x0c00, 0x0c10,
6844                                  0x0c30, 0x0d08, 0x1000, 0x101c,
6845                                  0x1040, 0x1048, 0x1080, 0x10a4,
6846                                  0x1400, 0x1490, 0x1498, 0x14f0,
6847                                  0x1500, 0x155c, 0x1580, 0x15dc,
6848                                  0x1600, 0x1658, 0x1680, 0x16d8,
6849                                  0x1800, 0x1820, 0x1840, 0x1854,
6850                                  0x1880, 0x1894, 0x1900, 0x1984,
6851                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6852                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6853                                  0x2000, 0x2030, 0x23c0, 0x2400,
6854                                  0x2800, 0x2820, 0x2830, 0x2850,
6855                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6856                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6857                                  0x4080, 0x4090, 0x43c0, 0x4458,
6858                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6859                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6860                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6861                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6862                                  0x6800, 0x6848, 0x684c, 0x6860,
6863                                  0x6888, 0x6910, 0x8000 };
6864
6865         regs->version = 0;
6866
6867         memset(p, 0, BNX2_REGDUMP_LEN);
6868
6869         if (!netif_running(bp->dev))
6870                 return;
6871
6872         i = 0;
6873         offset = reg_boundaries[0];
6874         p += offset;
6875         while (offset < BNX2_REGDUMP_LEN) {
6876                 *p++ = REG_RD(bp, offset);
6877                 offset += 4;
6878                 if (offset == reg_boundaries[i + 1]) {
6879                         offset = reg_boundaries[i + 2];
6880                         p = (u32 *) (orig_p + offset);
6881                         i += 2;
6882                 }
6883         }
6884 }
6885
6886 static void
6887 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6888 {
6889         struct bnx2 *bp = netdev_priv(dev);
6890
6891         if (bp->flags & BNX2_FLAG_NO_WOL) {
6892                 wol->supported = 0;
6893                 wol->wolopts = 0;
6894         }
6895         else {
6896                 wol->supported = WAKE_MAGIC;
6897                 if (bp->wol)
6898                         wol->wolopts = WAKE_MAGIC;
6899                 else
6900                         wol->wolopts = 0;
6901         }
6902         memset(&wol->sopass, 0, sizeof(wol->sopass));
6903 }
6904
6905 static int
6906 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6907 {
6908         struct bnx2 *bp = netdev_priv(dev);
6909
6910         if (wol->wolopts & ~WAKE_MAGIC)
6911                 return -EINVAL;
6912
6913         if (wol->wolopts & WAKE_MAGIC) {
6914                 if (bp->flags & BNX2_FLAG_NO_WOL)
6915                         return -EINVAL;
6916
6917                 bp->wol = 1;
6918         }
6919         else {
6920                 bp->wol = 0;
6921         }
6922         return 0;
6923 }
6924
6925 static int
6926 bnx2_nway_reset(struct net_device *dev)
6927 {
6928         struct bnx2 *bp = netdev_priv(dev);
6929         u32 bmcr;
6930
6931         if (!netif_running(dev))
6932                 return -EAGAIN;
6933
6934         if (!(bp->autoneg & AUTONEG_SPEED)) {
6935                 return -EINVAL;
6936         }
6937
6938         spin_lock_bh(&bp->phy_lock);
6939
6940         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6941                 int rc;
6942
6943                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6944                 spin_unlock_bh(&bp->phy_lock);
6945                 return rc;
6946         }
6947
6948         /* Force a link down visible on the other side */
6949         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6950                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6951                 spin_unlock_bh(&bp->phy_lock);
6952
6953                 msleep(20);
6954
6955                 spin_lock_bh(&bp->phy_lock);
6956
6957                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6958                 bp->serdes_an_pending = 1;
6959                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6960         }
6961
6962         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6963         bmcr &= ~BMCR_LOOPBACK;
6964         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6965
6966         spin_unlock_bh(&bp->phy_lock);
6967
6968         return 0;
6969 }
6970
6971 static u32
6972 bnx2_get_link(struct net_device *dev)
6973 {
6974         struct bnx2 *bp = netdev_priv(dev);
6975
6976         return bp->link_up;
6977 }
6978
6979 static int
6980 bnx2_get_eeprom_len(struct net_device *dev)
6981 {
6982         struct bnx2 *bp = netdev_priv(dev);
6983
6984         if (bp->flash_info == NULL)
6985                 return 0;
6986
6987         return (int) bp->flash_size;
6988 }
6989
6990 static int
6991 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6992                 u8 *eebuf)
6993 {
6994         struct bnx2 *bp = netdev_priv(dev);
6995         int rc;
6996
6997         if (!netif_running(dev))
6998                 return -EAGAIN;
6999
7000         /* parameters already validated in ethtool_get_eeprom */
7001
7002         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7003
7004         return rc;
7005 }
7006
7007 static int
7008 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7009                 u8 *eebuf)
7010 {
7011         struct bnx2 *bp = netdev_priv(dev);
7012         int rc;
7013
7014         if (!netif_running(dev))
7015                 return -EAGAIN;
7016
7017         /* parameters already validated in ethtool_set_eeprom */
7018
7019         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7020
7021         return rc;
7022 }
7023
7024 static int
7025 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7026 {
7027         struct bnx2 *bp = netdev_priv(dev);
7028
7029         memset(coal, 0, sizeof(struct ethtool_coalesce));
7030
7031         coal->rx_coalesce_usecs = bp->rx_ticks;
7032         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7033         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7034         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7035
7036         coal->tx_coalesce_usecs = bp->tx_ticks;
7037         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7038         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7039         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7040
7041         coal->stats_block_coalesce_usecs = bp->stats_ticks;
7042
7043         return 0;
7044 }
7045
7046 static int
7047 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7048 {
7049         struct bnx2 *bp = netdev_priv(dev);
7050
7051         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7052         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7053
7054         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7055         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7056
7057         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7058         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7059
7060         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7061         if (bp->rx_quick_cons_trip_int > 0xff)
7062                 bp->rx_quick_cons_trip_int = 0xff;
7063
7064         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7065         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7066
7067         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7068         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7069
7070         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7071         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7072
7073         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7074         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7075                 0xff;
7076
7077         bp->stats_ticks = coal->stats_block_coalesce_usecs;
7078         if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7079                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7080                         bp->stats_ticks = USEC_PER_SEC;
7081         }
7082         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7083                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7084         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7085
7086         if (netif_running(bp->dev)) {
7087                 bnx2_netif_stop(bp, true);
7088                 bnx2_init_nic(bp, 0);
7089                 bnx2_netif_start(bp, true);
7090         }
7091
7092         return 0;
7093 }
7094
7095 static void
7096 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7097 {
7098         struct bnx2 *bp = netdev_priv(dev);
7099
7100         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
7101         ering->rx_mini_max_pending = 0;
7102         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
7103
7104         ering->rx_pending = bp->rx_ring_size;
7105         ering->rx_mini_pending = 0;
7106         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7107
7108         ering->tx_max_pending = MAX_TX_DESC_CNT;
7109         ering->tx_pending = bp->tx_ring_size;
7110 }
7111
7112 static int
7113 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7114 {
7115         if (netif_running(bp->dev)) {
7116                 /* Reset will erase chipset stats; save them */
7117                 bnx2_save_stats(bp);
7118
7119                 bnx2_netif_stop(bp, true);
7120                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7121                 bnx2_free_skbs(bp);
7122                 bnx2_free_mem(bp);
7123         }
7124
7125         bnx2_set_rx_ring_size(bp, rx);
7126         bp->tx_ring_size = tx;
7127
7128         if (netif_running(bp->dev)) {
7129                 int rc;
7130
7131                 rc = bnx2_alloc_mem(bp);
7132                 if (!rc)
7133                         rc = bnx2_init_nic(bp, 0);
7134
7135                 if (rc) {
7136                         bnx2_napi_enable(bp);
7137                         dev_close(bp->dev);
7138                         return rc;
7139                 }
7140 #ifdef BCM_CNIC
7141                 mutex_lock(&bp->cnic_lock);
7142                 /* Let cnic know about the new status block. */
7143                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7144                         bnx2_setup_cnic_irq_info(bp);
7145                 mutex_unlock(&bp->cnic_lock);
7146 #endif
7147                 bnx2_netif_start(bp, true);
7148         }
7149         return 0;
7150 }
7151
7152 static int
7153 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7154 {
7155         struct bnx2 *bp = netdev_priv(dev);
7156         int rc;
7157
7158         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7159                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7160                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7161
7162                 return -EINVAL;
7163         }
7164         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7165         return rc;
7166 }
7167
7168 static void
7169 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7170 {
7171         struct bnx2 *bp = netdev_priv(dev);
7172
7173         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7174         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7175         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7176 }
7177
7178 static int
7179 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7180 {
7181         struct bnx2 *bp = netdev_priv(dev);
7182
7183         bp->req_flow_ctrl = 0;
7184         if (epause->rx_pause)
7185                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7186         if (epause->tx_pause)
7187                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7188
7189         if (epause->autoneg) {
7190                 bp->autoneg |= AUTONEG_FLOW_CTRL;
7191         }
7192         else {
7193                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7194         }
7195
7196         if (netif_running(dev)) {
7197                 spin_lock_bh(&bp->phy_lock);
7198                 bnx2_setup_phy(bp, bp->phy_port);
7199                 spin_unlock_bh(&bp->phy_lock);
7200         }
7201
7202         return 0;
7203 }
7204
7205 static u32
7206 bnx2_get_rx_csum(struct net_device *dev)
7207 {
7208         struct bnx2 *bp = netdev_priv(dev);
7209
7210         return bp->rx_csum;
7211 }
7212
7213 static int
7214 bnx2_set_rx_csum(struct net_device *dev, u32 data)
7215 {
7216         struct bnx2 *bp = netdev_priv(dev);
7217
7218         bp->rx_csum = data;
7219         return 0;
7220 }
7221
7222 static int
7223 bnx2_set_tso(struct net_device *dev, u32 data)
7224 {
7225         struct bnx2 *bp = netdev_priv(dev);
7226
7227         if (data) {
7228                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7229                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7230                         dev->features |= NETIF_F_TSO6;
7231         } else
7232                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7233                                    NETIF_F_TSO_ECN);
7234         return 0;
7235 }
7236
7237 static struct {
7238         char string[ETH_GSTRING_LEN];
7239 } bnx2_stats_str_arr[] = {
7240         { "rx_bytes" },
7241         { "rx_error_bytes" },
7242         { "tx_bytes" },
7243         { "tx_error_bytes" },
7244         { "rx_ucast_packets" },
7245         { "rx_mcast_packets" },
7246         { "rx_bcast_packets" },
7247         { "tx_ucast_packets" },
7248         { "tx_mcast_packets" },
7249         { "tx_bcast_packets" },
7250         { "tx_mac_errors" },
7251         { "tx_carrier_errors" },
7252         { "rx_crc_errors" },
7253         { "rx_align_errors" },
7254         { "tx_single_collisions" },
7255         { "tx_multi_collisions" },
7256         { "tx_deferred" },
7257         { "tx_excess_collisions" },
7258         { "tx_late_collisions" },
7259         { "tx_total_collisions" },
7260         { "rx_fragments" },
7261         { "rx_jabbers" },
7262         { "rx_undersize_packets" },
7263         { "rx_oversize_packets" },
7264         { "rx_64_byte_packets" },
7265         { "rx_65_to_127_byte_packets" },
7266         { "rx_128_to_255_byte_packets" },
7267         { "rx_256_to_511_byte_packets" },
7268         { "rx_512_to_1023_byte_packets" },
7269         { "rx_1024_to_1522_byte_packets" },
7270         { "rx_1523_to_9022_byte_packets" },
7271         { "tx_64_byte_packets" },
7272         { "tx_65_to_127_byte_packets" },
7273         { "tx_128_to_255_byte_packets" },
7274         { "tx_256_to_511_byte_packets" },
7275         { "tx_512_to_1023_byte_packets" },
7276         { "tx_1024_to_1522_byte_packets" },
7277         { "tx_1523_to_9022_byte_packets" },
7278         { "rx_xon_frames" },
7279         { "rx_xoff_frames" },
7280         { "tx_xon_frames" },
7281         { "tx_xoff_frames" },
7282         { "rx_mac_ctrl_frames" },
7283         { "rx_filtered_packets" },
7284         { "rx_ftq_discards" },
7285         { "rx_discards" },
7286         { "rx_fw_discards" },
7287 };
7288
7289 #define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7290                         sizeof(bnx2_stats_str_arr[0]))
7291
7292 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7293
7294 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7295     STATS_OFFSET32(stat_IfHCInOctets_hi),
7296     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7297     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7298     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7299     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7300     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7301     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7302     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7303     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7304     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7305     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7306     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7307     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7308     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7309     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7310     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7311     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7312     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7313     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7314     STATS_OFFSET32(stat_EtherStatsCollisions),
7315     STATS_OFFSET32(stat_EtherStatsFragments),
7316     STATS_OFFSET32(stat_EtherStatsJabbers),
7317     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7318     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7319     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7320     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7321     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7322     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7323     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7324     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7325     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7326     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7327     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7328     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7329     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7330     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7331     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7332     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7333     STATS_OFFSET32(stat_XonPauseFramesReceived),
7334     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7335     STATS_OFFSET32(stat_OutXonSent),
7336     STATS_OFFSET32(stat_OutXoffSent),
7337     STATS_OFFSET32(stat_MacControlFramesReceived),
7338     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7339     STATS_OFFSET32(stat_IfInFTQDiscards),
7340     STATS_OFFSET32(stat_IfInMBUFDiscards),
7341     STATS_OFFSET32(stat_FwRxDrop),
7342 };
7343
7344 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7345  * skipped because of errata.
7346  */
7347 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7348         8,0,8,8,8,8,8,8,8,8,
7349         4,0,4,4,4,4,4,4,4,4,
7350         4,4,4,4,4,4,4,4,4,4,
7351         4,4,4,4,4,4,4,4,4,4,
7352         4,4,4,4,4,4,4,
7353 };
7354
7355 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7356         8,0,8,8,8,8,8,8,8,8,
7357         4,4,4,4,4,4,4,4,4,4,
7358         4,4,4,4,4,4,4,4,4,4,
7359         4,4,4,4,4,4,4,4,4,4,
7360         4,4,4,4,4,4,4,
7361 };
7362
7363 #define BNX2_NUM_TESTS 6
7364
7365 static struct {
7366         char string[ETH_GSTRING_LEN];
7367 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7368         { "register_test (offline)" },
7369         { "memory_test (offline)" },
7370         { "loopback_test (offline)" },
7371         { "nvram_test (online)" },
7372         { "interrupt_test (online)" },
7373         { "link_test (online)" },
7374 };
7375
7376 static int
7377 bnx2_get_sset_count(struct net_device *dev, int sset)
7378 {
7379         switch (sset) {
7380         case ETH_SS_TEST:
7381                 return BNX2_NUM_TESTS;
7382         case ETH_SS_STATS:
7383                 return BNX2_NUM_STATS;
7384         default:
7385                 return -EOPNOTSUPP;
7386         }
7387 }
7388
7389 static void
7390 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7391 {
7392         struct bnx2 *bp = netdev_priv(dev);
7393
7394         bnx2_set_power_state(bp, PCI_D0);
7395
7396         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7397         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7398                 int i;
7399
7400                 bnx2_netif_stop(bp, true);
7401                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7402                 bnx2_free_skbs(bp);
7403
7404                 if (bnx2_test_registers(bp) != 0) {
7405                         buf[0] = 1;
7406                         etest->flags |= ETH_TEST_FL_FAILED;
7407                 }
7408                 if (bnx2_test_memory(bp) != 0) {
7409                         buf[1] = 1;
7410                         etest->flags |= ETH_TEST_FL_FAILED;
7411                 }
7412                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7413                         etest->flags |= ETH_TEST_FL_FAILED;
7414
7415                 if (!netif_running(bp->dev))
7416                         bnx2_shutdown_chip(bp);
7417                 else {
7418                         bnx2_init_nic(bp, 1);
7419                         bnx2_netif_start(bp, true);
7420                 }
7421
7422                 /* wait for link up */
7423                 for (i = 0; i < 7; i++) {
7424                         if (bp->link_up)
7425                                 break;
7426                         msleep_interruptible(1000);
7427                 }
7428         }
7429
7430         if (bnx2_test_nvram(bp) != 0) {
7431                 buf[3] = 1;
7432                 etest->flags |= ETH_TEST_FL_FAILED;
7433         }
7434         if (bnx2_test_intr(bp) != 0) {
7435                 buf[4] = 1;
7436                 etest->flags |= ETH_TEST_FL_FAILED;
7437         }
7438
7439         if (bnx2_test_link(bp) != 0) {
7440                 buf[5] = 1;
7441                 etest->flags |= ETH_TEST_FL_FAILED;
7442
7443         }
7444         if (!netif_running(bp->dev))
7445                 bnx2_set_power_state(bp, PCI_D3hot);
7446 }
7447
7448 static void
7449 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7450 {
7451         switch (stringset) {
7452         case ETH_SS_STATS:
7453                 memcpy(buf, bnx2_stats_str_arr,
7454                         sizeof(bnx2_stats_str_arr));
7455                 break;
7456         case ETH_SS_TEST:
7457                 memcpy(buf, bnx2_tests_str_arr,
7458                         sizeof(bnx2_tests_str_arr));
7459                 break;
7460         }
7461 }
7462
7463 static void
7464 bnx2_get_ethtool_stats(struct net_device *dev,
7465                 struct ethtool_stats *stats, u64 *buf)
7466 {
7467         struct bnx2 *bp = netdev_priv(dev);
7468         int i;
7469         u32 *hw_stats = (u32 *) bp->stats_blk;
7470         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7471         u8 *stats_len_arr = NULL;
7472
7473         if (hw_stats == NULL) {
7474                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7475                 return;
7476         }
7477
7478         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7479             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7480             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7481             (CHIP_ID(bp) == CHIP_ID_5708_A0))
7482                 stats_len_arr = bnx2_5706_stats_len_arr;
7483         else
7484                 stats_len_arr = bnx2_5708_stats_len_arr;
7485
7486         for (i = 0; i < BNX2_NUM_STATS; i++) {
7487                 unsigned long offset;
7488
7489                 if (stats_len_arr[i] == 0) {
7490                         /* skip this counter */
7491                         buf[i] = 0;
7492                         continue;
7493                 }
7494
7495                 offset = bnx2_stats_offset_arr[i];
7496                 if (stats_len_arr[i] == 4) {
7497                         /* 4-byte counter */
7498                         buf[i] = (u64) *(hw_stats + offset) +
7499                                  *(temp_stats + offset);
7500                         continue;
7501                 }
7502                 /* 8-byte counter */
7503                 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7504                          *(hw_stats + offset + 1) +
7505                          (((u64) *(temp_stats + offset)) << 32) +
7506                          *(temp_stats + offset + 1);
7507         }
7508 }
7509
7510 static int
7511 bnx2_phys_id(struct net_device *dev, u32 data)
7512 {
7513         struct bnx2 *bp = netdev_priv(dev);
7514         int i;
7515         u32 save;
7516
7517         bnx2_set_power_state(bp, PCI_D0);
7518
7519         if (data == 0)
7520                 data = 2;
7521
7522         save = REG_RD(bp, BNX2_MISC_CFG);
7523         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7524
7525         for (i = 0; i < (data * 2); i++) {
7526                 if ((i % 2) == 0) {
7527                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7528                 }
7529                 else {
7530                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7531                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
7532                                 BNX2_EMAC_LED_100MB_OVERRIDE |
7533                                 BNX2_EMAC_LED_10MB_OVERRIDE |
7534                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7535                                 BNX2_EMAC_LED_TRAFFIC);
7536                 }
7537                 msleep_interruptible(500);
7538                 if (signal_pending(current))
7539                         break;
7540         }
7541         REG_WR(bp, BNX2_EMAC_LED, 0);
7542         REG_WR(bp, BNX2_MISC_CFG, save);
7543
7544         if (!netif_running(dev))
7545                 bnx2_set_power_state(bp, PCI_D3hot);
7546
7547         return 0;
7548 }
7549
7550 static int
7551 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7552 {
7553         struct bnx2 *bp = netdev_priv(dev);
7554
7555         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7556                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7557         else
7558                 return (ethtool_op_set_tx_csum(dev, data));
7559 }
7560
7561 static const struct ethtool_ops bnx2_ethtool_ops = {
7562         .get_settings           = bnx2_get_settings,
7563         .set_settings           = bnx2_set_settings,
7564         .get_drvinfo            = bnx2_get_drvinfo,
7565         .get_regs_len           = bnx2_get_regs_len,
7566         .get_regs               = bnx2_get_regs,
7567         .get_wol                = bnx2_get_wol,
7568         .set_wol                = bnx2_set_wol,
7569         .nway_reset             = bnx2_nway_reset,
7570         .get_link               = bnx2_get_link,
7571         .get_eeprom_len         = bnx2_get_eeprom_len,
7572         .get_eeprom             = bnx2_get_eeprom,
7573         .set_eeprom             = bnx2_set_eeprom,
7574         .get_coalesce           = bnx2_get_coalesce,
7575         .set_coalesce           = bnx2_set_coalesce,
7576         .get_ringparam          = bnx2_get_ringparam,
7577         .set_ringparam          = bnx2_set_ringparam,
7578         .get_pauseparam         = bnx2_get_pauseparam,
7579         .set_pauseparam         = bnx2_set_pauseparam,
7580         .get_rx_csum            = bnx2_get_rx_csum,
7581         .set_rx_csum            = bnx2_set_rx_csum,
7582         .set_tx_csum            = bnx2_set_tx_csum,
7583         .set_sg                 = ethtool_op_set_sg,
7584         .set_tso                = bnx2_set_tso,
7585         .self_test              = bnx2_self_test,
7586         .get_strings            = bnx2_get_strings,
7587         .phys_id                = bnx2_phys_id,
7588         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7589         .get_sset_count         = bnx2_get_sset_count,
7590 };
7591
7592 /* Called with rtnl_lock */
7593 static int
7594 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7595 {
7596         struct mii_ioctl_data *data = if_mii(ifr);
7597         struct bnx2 *bp = netdev_priv(dev);
7598         int err;
7599
7600         switch(cmd) {
7601         case SIOCGMIIPHY:
7602                 data->phy_id = bp->phy_addr;
7603
7604                 /* fallthru */
7605         case SIOCGMIIREG: {
7606                 u32 mii_regval;
7607
7608                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7609                         return -EOPNOTSUPP;
7610
7611                 if (!netif_running(dev))
7612                         return -EAGAIN;
7613
7614                 spin_lock_bh(&bp->phy_lock);
7615                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7616                 spin_unlock_bh(&bp->phy_lock);
7617
7618                 data->val_out = mii_regval;
7619
7620                 return err;
7621         }
7622
7623         case SIOCSMIIREG:
7624                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7625                         return -EOPNOTSUPP;
7626
7627                 if (!netif_running(dev))
7628                         return -EAGAIN;
7629
7630                 spin_lock_bh(&bp->phy_lock);
7631                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7632                 spin_unlock_bh(&bp->phy_lock);
7633
7634                 return err;
7635
7636         default:
7637                 /* do nothing */
7638                 break;
7639         }
7640         return -EOPNOTSUPP;
7641 }
7642
7643 /* Called with rtnl_lock */
7644 static int
7645 bnx2_change_mac_addr(struct net_device *dev, void *p)
7646 {
7647         struct sockaddr *addr = p;
7648         struct bnx2 *bp = netdev_priv(dev);
7649
7650         if (!is_valid_ether_addr(addr->sa_data))
7651                 return -EINVAL;
7652
7653         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7654         if (netif_running(dev))
7655                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7656
7657         return 0;
7658 }
7659
7660 /* Called with rtnl_lock */
7661 static int
7662 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7663 {
7664         struct bnx2 *bp = netdev_priv(dev);
7665
7666         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7667                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7668                 return -EINVAL;
7669
7670         dev->mtu = new_mtu;
7671         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7672 }
7673
7674 #ifdef CONFIG_NET_POLL_CONTROLLER
7675 static void
7676 poll_bnx2(struct net_device *dev)
7677 {
7678         struct bnx2 *bp = netdev_priv(dev);
7679         int i;
7680
7681         for (i = 0; i < bp->irq_nvecs; i++) {
7682                 struct bnx2_irq *irq = &bp->irq_tbl[i];
7683
7684                 disable_irq(irq->vector);
7685                 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7686                 enable_irq(irq->vector);
7687         }
7688 }
7689 #endif
7690
7691 static void __devinit
7692 bnx2_get_5709_media(struct bnx2 *bp)
7693 {
7694         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7695         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7696         u32 strap;
7697
7698         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7699                 return;
7700         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7701                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7702                 return;
7703         }
7704
7705         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7706                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7707         else
7708                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7709
7710         if (PCI_FUNC(bp->pdev->devfn) == 0) {
7711                 switch (strap) {
7712                 case 0x4:
7713                 case 0x5:
7714                 case 0x6:
7715                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7716                         return;
7717                 }
7718         } else {
7719                 switch (strap) {
7720                 case 0x1:
7721                 case 0x2:
7722                 case 0x4:
7723                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7724                         return;
7725                 }
7726         }
7727 }
7728
7729 static void __devinit
7730 bnx2_get_pci_speed(struct bnx2 *bp)
7731 {
7732         u32 reg;
7733
7734         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7735         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7736                 u32 clkreg;
7737
7738                 bp->flags |= BNX2_FLAG_PCIX;
7739
7740                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7741
7742                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7743                 switch (clkreg) {
7744                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7745                         bp->bus_speed_mhz = 133;
7746                         break;
7747
7748                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7749                         bp->bus_speed_mhz = 100;
7750                         break;
7751
7752                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7753                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7754                         bp->bus_speed_mhz = 66;
7755                         break;
7756
7757                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7758                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7759                         bp->bus_speed_mhz = 50;
7760                         break;
7761
7762                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7763                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7764                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7765                         bp->bus_speed_mhz = 33;
7766                         break;
7767                 }
7768         }
7769         else {
7770                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7771                         bp->bus_speed_mhz = 66;
7772                 else
7773                         bp->bus_speed_mhz = 33;
7774         }
7775
7776         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7777                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7778
7779 }
7780
7781 static void __devinit
7782 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7783 {
7784         int rc, i, j;
7785         u8 *data;
7786         unsigned int block_end, rosize, len;
7787
7788 #define BNX2_VPD_NVRAM_OFFSET   0x300
7789 #define BNX2_VPD_LEN            128
7790 #define BNX2_MAX_VER_SLEN       30
7791
7792         data = kmalloc(256, GFP_KERNEL);
7793         if (!data)
7794                 return;
7795
7796         rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7797                              BNX2_VPD_LEN);
7798         if (rc)
7799                 goto vpd_done;
7800
7801         for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7802                 data[i] = data[i + BNX2_VPD_LEN + 3];
7803                 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7804                 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7805                 data[i + 3] = data[i + BNX2_VPD_LEN];
7806         }
7807
7808         i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7809         if (i < 0)
7810                 goto vpd_done;
7811
7812         rosize = pci_vpd_lrdt_size(&data[i]);
7813         i += PCI_VPD_LRDT_TAG_SIZE;
7814         block_end = i + rosize;
7815
7816         if (block_end > BNX2_VPD_LEN)
7817                 goto vpd_done;
7818
7819         j = pci_vpd_find_info_keyword(data, i, rosize,
7820                                       PCI_VPD_RO_KEYWORD_MFR_ID);
7821         if (j < 0)
7822                 goto vpd_done;
7823
7824         len = pci_vpd_info_field_size(&data[j]);
7825
7826         j += PCI_VPD_INFO_FLD_HDR_SIZE;
7827         if (j + len > block_end || len != 4 ||
7828             memcmp(&data[j], "1028", 4))
7829                 goto vpd_done;
7830
7831         j = pci_vpd_find_info_keyword(data, i, rosize,
7832                                       PCI_VPD_RO_KEYWORD_VENDOR0);
7833         if (j < 0)
7834                 goto vpd_done;
7835
7836         len = pci_vpd_info_field_size(&data[j]);
7837
7838         j += PCI_VPD_INFO_FLD_HDR_SIZE;
7839         if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
7840                 goto vpd_done;
7841
7842         memcpy(bp->fw_version, &data[j], len);
7843         bp->fw_version[len] = ' ';
7844
7845 vpd_done:
7846         kfree(data);
7847 }
7848
7849 static int __devinit
7850 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7851 {
7852         struct bnx2 *bp;
7853         unsigned long mem_len;
7854         int rc, i, j;
7855         u32 reg;
7856         u64 dma_mask, persist_dma_mask;
7857
7858         SET_NETDEV_DEV(dev, &pdev->dev);
7859         bp = netdev_priv(dev);
7860
7861         bp->flags = 0;
7862         bp->phy_flags = 0;
7863
7864         bp->temp_stats_blk =
7865                 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7866
7867         if (bp->temp_stats_blk == NULL) {
7868                 rc = -ENOMEM;
7869                 goto err_out;
7870         }
7871
7872         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7873         rc = pci_enable_device(pdev);
7874         if (rc) {
7875                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
7876                 goto err_out;
7877         }
7878
7879         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7880                 dev_err(&pdev->dev,
7881                         "Cannot find PCI device base address, aborting\n");
7882                 rc = -ENODEV;
7883                 goto err_out_disable;
7884         }
7885
7886         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7887         if (rc) {
7888                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
7889                 goto err_out_disable;
7890         }
7891
7892         pci_set_master(pdev);
7893         pci_save_state(pdev);
7894
7895         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7896         if (bp->pm_cap == 0) {
7897                 dev_err(&pdev->dev,
7898                         "Cannot find power management capability, aborting\n");
7899                 rc = -EIO;
7900                 goto err_out_release;
7901         }
7902
7903         bp->dev = dev;
7904         bp->pdev = pdev;
7905
7906         spin_lock_init(&bp->phy_lock);
7907         spin_lock_init(&bp->indirect_lock);
7908 #ifdef BCM_CNIC
7909         mutex_init(&bp->cnic_lock);
7910 #endif
7911         INIT_WORK(&bp->reset_task, bnx2_reset_task);
7912
7913         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7914         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7915         dev->mem_end = dev->mem_start + mem_len;
7916         dev->irq = pdev->irq;
7917
7918         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7919
7920         if (!bp->regview) {
7921                 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
7922                 rc = -ENOMEM;
7923                 goto err_out_release;
7924         }
7925
7926         /* Configure byte swap and enable write to the reg_window registers.
7927          * Rely on CPU to do target byte swapping on big endian systems
7928          * The chip's target access swapping will not swap all accesses
7929          */
7930         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7931                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7932                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7933
7934         bnx2_set_power_state(bp, PCI_D0);
7935
7936         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7937
7938         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7939                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7940                         dev_err(&pdev->dev,
7941                                 "Cannot find PCIE capability, aborting\n");
7942                         rc = -EIO;
7943                         goto err_out_unmap;
7944                 }
7945                 bp->flags |= BNX2_FLAG_PCIE;
7946                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7947                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7948         } else {
7949                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7950                 if (bp->pcix_cap == 0) {
7951                         dev_err(&pdev->dev,
7952                                 "Cannot find PCIX capability, aborting\n");
7953                         rc = -EIO;
7954                         goto err_out_unmap;
7955                 }
7956                 bp->flags |= BNX2_FLAG_BROKEN_STATS;
7957         }
7958
7959         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7960                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7961                         bp->flags |= BNX2_FLAG_MSIX_CAP;
7962         }
7963
7964         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7965                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7966                         bp->flags |= BNX2_FLAG_MSI_CAP;
7967         }
7968
7969         /* 5708 cannot support DMA addresses > 40-bit.  */
7970         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7971                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7972         else
7973                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7974
7975         /* Configure DMA attributes. */
7976         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7977                 dev->features |= NETIF_F_HIGHDMA;
7978                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7979                 if (rc) {
7980                         dev_err(&pdev->dev,
7981                                 "pci_set_consistent_dma_mask failed, aborting\n");
7982                         goto err_out_unmap;
7983                 }
7984         } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7985                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
7986                 goto err_out_unmap;
7987         }
7988
7989         if (!(bp->flags & BNX2_FLAG_PCIE))
7990                 bnx2_get_pci_speed(bp);
7991
7992         /* 5706A0 may falsely detect SERR and PERR. */
7993         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7994                 reg = REG_RD(bp, PCI_COMMAND);
7995                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7996                 REG_WR(bp, PCI_COMMAND, reg);
7997         }
7998         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7999                 !(bp->flags & BNX2_FLAG_PCIX)) {
8000
8001                 dev_err(&pdev->dev,
8002                         "5706 A1 can only be used in a PCIX bus, aborting\n");
8003                 goto err_out_unmap;
8004         }
8005
8006         bnx2_init_nvram(bp);
8007
8008         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8009
8010         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8011             BNX2_SHM_HDR_SIGNATURE_SIG) {
8012                 u32 off = PCI_FUNC(pdev->devfn) << 2;
8013
8014                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8015         } else
8016                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8017
8018         /* Get the permanent MAC address.  First we need to make sure the
8019          * firmware is actually running.
8020          */
8021         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8022
8023         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8024             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8025                 dev_err(&pdev->dev, "Firmware not running, aborting\n");
8026                 rc = -ENODEV;
8027                 goto err_out_unmap;
8028         }
8029
8030         bnx2_read_vpd_fw_ver(bp);
8031
8032         j = strlen(bp->fw_version);
8033         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8034         for (i = 0; i < 3 && j < 24; i++) {
8035                 u8 num, k, skip0;
8036
8037                 if (i == 0) {
8038                         bp->fw_version[j++] = 'b';
8039                         bp->fw_version[j++] = 'c';
8040                         bp->fw_version[j++] = ' ';
8041                 }
8042                 num = (u8) (reg >> (24 - (i * 8)));
8043                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8044                         if (num >= k || !skip0 || k == 1) {
8045                                 bp->fw_version[j++] = (num / k) + '0';
8046                                 skip0 = 0;
8047                         }
8048                 }
8049                 if (i != 2)
8050                         bp->fw_version[j++] = '.';
8051         }
8052         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8053         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8054                 bp->wol = 1;
8055
8056         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8057                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8058
8059                 for (i = 0; i < 30; i++) {
8060                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8061                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8062                                 break;
8063                         msleep(10);
8064                 }
8065         }
8066         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8067         reg &= BNX2_CONDITION_MFW_RUN_MASK;
8068         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8069             reg != BNX2_CONDITION_MFW_RUN_NONE) {
8070                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8071
8072                 if (j < 32)
8073                         bp->fw_version[j++] = ' ';
8074                 for (i = 0; i < 3 && j < 28; i++) {
8075                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8076                         reg = swab32(reg);
8077                         memcpy(&bp->fw_version[j], &reg, 4);
8078                         j += 4;
8079                 }
8080         }
8081
8082         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8083         bp->mac_addr[0] = (u8) (reg >> 8);
8084         bp->mac_addr[1] = (u8) reg;
8085
8086         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8087         bp->mac_addr[2] = (u8) (reg >> 24);
8088         bp->mac_addr[3] = (u8) (reg >> 16);
8089         bp->mac_addr[4] = (u8) (reg >> 8);
8090         bp->mac_addr[5] = (u8) reg;
8091
8092         bp->tx_ring_size = MAX_TX_DESC_CNT;
8093         bnx2_set_rx_ring_size(bp, 255);
8094
8095         bp->rx_csum = 1;
8096
8097         bp->tx_quick_cons_trip_int = 2;
8098         bp->tx_quick_cons_trip = 20;
8099         bp->tx_ticks_int = 18;
8100         bp->tx_ticks = 80;
8101
8102         bp->rx_quick_cons_trip_int = 2;
8103         bp->rx_quick_cons_trip = 12;
8104         bp->rx_ticks_int = 18;
8105         bp->rx_ticks = 18;
8106
8107         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8108
8109         bp->current_interval = BNX2_TIMER_INTERVAL;
8110
8111         bp->phy_addr = 1;
8112
8113         /* Disable WOL support if we are running on a SERDES chip. */
8114         if (CHIP_NUM(bp) == CHIP_NUM_5709)
8115                 bnx2_get_5709_media(bp);
8116         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8117                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8118
8119         bp->phy_port = PORT_TP;
8120         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8121                 bp->phy_port = PORT_FIBRE;
8122                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8123                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8124                         bp->flags |= BNX2_FLAG_NO_WOL;
8125                         bp->wol = 0;
8126                 }
8127                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8128                         /* Don't do parallel detect on this board because of
8129                          * some board problems.  The link will not go down
8130                          * if we do parallel detect.
8131                          */
8132                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8133                             pdev->subsystem_device == 0x310c)
8134                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8135                 } else {
8136                         bp->phy_addr = 2;
8137                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8138                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8139                 }
8140         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8141                    CHIP_NUM(bp) == CHIP_NUM_5708)
8142                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8143         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8144                  (CHIP_REV(bp) == CHIP_REV_Ax ||
8145                   CHIP_REV(bp) == CHIP_REV_Bx))
8146                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8147
8148         bnx2_init_fw_cap(bp);
8149
8150         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8151             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8152             (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8153             !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8154                 bp->flags |= BNX2_FLAG_NO_WOL;
8155                 bp->wol = 0;
8156         }
8157
8158         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8159                 bp->tx_quick_cons_trip_int =
8160                         bp->tx_quick_cons_trip;
8161                 bp->tx_ticks_int = bp->tx_ticks;
8162                 bp->rx_quick_cons_trip_int =
8163                         bp->rx_quick_cons_trip;
8164                 bp->rx_ticks_int = bp->rx_ticks;
8165                 bp->comp_prod_trip_int = bp->comp_prod_trip;
8166                 bp->com_ticks_int = bp->com_ticks;
8167                 bp->cmd_ticks_int = bp->cmd_ticks;
8168         }
8169
8170         /* Disable MSI on 5706 if AMD 8132 bridge is found.
8171          *
8172          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8173          * with byte enables disabled on the unused 32-bit word.  This is legal
8174          * but causes problems on the AMD 8132 which will eventually stop
8175          * responding after a while.
8176          *
8177          * AMD believes this incompatibility is unique to the 5706, and
8178          * prefers to locally disable MSI rather than globally disabling it.
8179          */
8180         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8181                 struct pci_dev *amd_8132 = NULL;
8182
8183                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8184                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
8185                                                   amd_8132))) {
8186
8187                         if (amd_8132->revision >= 0x10 &&
8188                             amd_8132->revision <= 0x13) {
8189                                 disable_msi = 1;
8190                                 pci_dev_put(amd_8132);
8191                                 break;
8192                         }
8193                 }
8194         }
8195
8196         bnx2_set_default_link(bp);
8197         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8198
8199         init_timer(&bp->timer);
8200         bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8201         bp->timer.data = (unsigned long) bp;
8202         bp->timer.function = bnx2_timer;
8203
8204         return 0;
8205
8206 err_out_unmap:
8207         if (bp->regview) {
8208                 iounmap(bp->regview);
8209                 bp->regview = NULL;
8210         }
8211
8212 err_out_release:
8213         pci_release_regions(pdev);
8214
8215 err_out_disable:
8216         pci_disable_device(pdev);
8217         pci_set_drvdata(pdev, NULL);
8218
8219 err_out:
8220         return rc;
8221 }
8222
8223 static char * __devinit
8224 bnx2_bus_string(struct bnx2 *bp, char *str)
8225 {
8226         char *s = str;
8227
8228         if (bp->flags & BNX2_FLAG_PCIE) {
8229                 s += sprintf(s, "PCI Express");
8230         } else {
8231                 s += sprintf(s, "PCI");
8232                 if (bp->flags & BNX2_FLAG_PCIX)
8233                         s += sprintf(s, "-X");
8234                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8235                         s += sprintf(s, " 32-bit");
8236                 else
8237                         s += sprintf(s, " 64-bit");
8238                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8239         }
8240         return str;
8241 }
8242
8243 static void
8244 bnx2_del_napi(struct bnx2 *bp)
8245 {
8246         int i;
8247
8248         for (i = 0; i < bp->irq_nvecs; i++)
8249                 netif_napi_del(&bp->bnx2_napi[i].napi);
8250 }
8251
8252 static void
8253 bnx2_init_napi(struct bnx2 *bp)
8254 {
8255         int i;
8256
8257         for (i = 0; i < bp->irq_nvecs; i++) {
8258                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8259                 int (*poll)(struct napi_struct *, int);
8260
8261                 if (i == 0)
8262                         poll = bnx2_poll;
8263                 else
8264                         poll = bnx2_poll_msix;
8265
8266                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8267                 bnapi->bp = bp;
8268         }
8269 }
8270
8271 static const struct net_device_ops bnx2_netdev_ops = {
8272         .ndo_open               = bnx2_open,
8273         .ndo_start_xmit         = bnx2_start_xmit,
8274         .ndo_stop               = bnx2_close,
8275         .ndo_get_stats          = bnx2_get_stats,
8276         .ndo_set_rx_mode        = bnx2_set_rx_mode,
8277         .ndo_do_ioctl           = bnx2_ioctl,
8278         .ndo_validate_addr      = eth_validate_addr,
8279         .ndo_set_mac_address    = bnx2_change_mac_addr,
8280         .ndo_change_mtu         = bnx2_change_mtu,
8281         .ndo_tx_timeout         = bnx2_tx_timeout,
8282 #ifdef BCM_VLAN
8283         .ndo_vlan_rx_register   = bnx2_vlan_rx_register,
8284 #endif
8285 #ifdef CONFIG_NET_POLL_CONTROLLER
8286         .ndo_poll_controller    = poll_bnx2,
8287 #endif
8288 };
8289
8290 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
8291 {
8292 #ifdef BCM_VLAN
8293         dev->vlan_features |= flags;
8294 #endif
8295 }
8296
8297 static int __devinit
8298 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8299 {
8300         static int version_printed = 0;
8301         struct net_device *dev = NULL;
8302         struct bnx2 *bp;
8303         int rc;
8304         char str[40];
8305
8306         if (version_printed++ == 0)
8307                 pr_info("%s", version);
8308
8309         /* dev zeroed in init_etherdev */
8310         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8311
8312         if (!dev)
8313                 return -ENOMEM;
8314
8315         rc = bnx2_init_board(pdev, dev);
8316         if (rc < 0) {
8317                 free_netdev(dev);
8318                 return rc;
8319         }
8320
8321         dev->netdev_ops = &bnx2_netdev_ops;
8322         dev->watchdog_timeo = TX_TIMEOUT;
8323         dev->ethtool_ops = &bnx2_ethtool_ops;
8324
8325         bp = netdev_priv(dev);
8326
8327         pci_set_drvdata(pdev, dev);
8328
8329         rc = bnx2_request_firmware(bp);
8330         if (rc)
8331                 goto error;
8332
8333         memcpy(dev->dev_addr, bp->mac_addr, 6);
8334         memcpy(dev->perm_addr, bp->mac_addr, 6);
8335
8336         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO;
8337         vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8338         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8339                 dev->features |= NETIF_F_IPV6_CSUM;
8340                 vlan_features_add(dev, NETIF_F_IPV6_CSUM);
8341         }
8342 #ifdef BCM_VLAN
8343         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8344 #endif
8345         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8346         vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
8347         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8348                 dev->features |= NETIF_F_TSO6;
8349                 vlan_features_add(dev, NETIF_F_TSO6);
8350         }
8351         if ((rc = register_netdev(dev))) {
8352                 dev_err(&pdev->dev, "Cannot register net device\n");
8353                 goto error;
8354         }
8355
8356         netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8357                     board_info[ent->driver_data].name,
8358                     ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8359                     ((CHIP_ID(bp) & 0x0ff0) >> 4),
8360                     bnx2_bus_string(bp, str),
8361                     dev->base_addr,
8362                     bp->pdev->irq, dev->dev_addr);
8363
8364         return 0;
8365
8366 error:
8367         if (bp->mips_firmware)
8368                 release_firmware(bp->mips_firmware);
8369         if (bp->rv2p_firmware)
8370                 release_firmware(bp->rv2p_firmware);
8371
8372         if (bp->regview)
8373                 iounmap(bp->regview);
8374         pci_release_regions(pdev);
8375         pci_disable_device(pdev);
8376         pci_set_drvdata(pdev, NULL);
8377         free_netdev(dev);
8378         return rc;
8379 }
8380
8381 static void __devexit
8382 bnx2_remove_one(struct pci_dev *pdev)
8383 {
8384         struct net_device *dev = pci_get_drvdata(pdev);
8385         struct bnx2 *bp = netdev_priv(dev);
8386
8387         flush_scheduled_work();
8388
8389         unregister_netdev(dev);
8390
8391         if (bp->mips_firmware)
8392                 release_firmware(bp->mips_firmware);
8393         if (bp->rv2p_firmware)
8394                 release_firmware(bp->rv2p_firmware);
8395
8396         if (bp->regview)
8397                 iounmap(bp->regview);
8398
8399         kfree(bp->temp_stats_blk);
8400
8401         free_netdev(dev);
8402         pci_release_regions(pdev);
8403         pci_disable_device(pdev);
8404         pci_set_drvdata(pdev, NULL);
8405 }
8406
8407 static int
8408 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8409 {
8410         struct net_device *dev = pci_get_drvdata(pdev);
8411         struct bnx2 *bp = netdev_priv(dev);
8412
8413         /* PCI register 4 needs to be saved whether netif_running() or not.
8414          * MSI address and data need to be saved if using MSI and
8415          * netif_running().
8416          */
8417         pci_save_state(pdev);
8418         if (!netif_running(dev))
8419                 return 0;
8420
8421         flush_scheduled_work();
8422         bnx2_netif_stop(bp, true);
8423         netif_device_detach(dev);
8424         del_timer_sync(&bp->timer);
8425         bnx2_shutdown_chip(bp);
8426         bnx2_free_skbs(bp);
8427         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8428         return 0;
8429 }
8430
8431 static int
8432 bnx2_resume(struct pci_dev *pdev)
8433 {
8434         struct net_device *dev = pci_get_drvdata(pdev);
8435         struct bnx2 *bp = netdev_priv(dev);
8436
8437         pci_restore_state(pdev);
8438         if (!netif_running(dev))
8439                 return 0;
8440
8441         bnx2_set_power_state(bp, PCI_D0);
8442         netif_device_attach(dev);
8443         bnx2_init_nic(bp, 1);
8444         bnx2_netif_start(bp, true);
8445         return 0;
8446 }
8447
8448 /**
8449  * bnx2_io_error_detected - called when PCI error is detected
8450  * @pdev: Pointer to PCI device
8451  * @state: The current pci connection state
8452  *
8453  * This function is called after a PCI bus error affecting
8454  * this device has been detected.
8455  */
8456 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8457                                                pci_channel_state_t state)
8458 {
8459         struct net_device *dev = pci_get_drvdata(pdev);
8460         struct bnx2 *bp = netdev_priv(dev);
8461
8462         rtnl_lock();
8463         netif_device_detach(dev);
8464
8465         if (state == pci_channel_io_perm_failure) {
8466                 rtnl_unlock();
8467                 return PCI_ERS_RESULT_DISCONNECT;
8468         }
8469
8470         if (netif_running(dev)) {
8471                 bnx2_netif_stop(bp, true);
8472                 del_timer_sync(&bp->timer);
8473                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8474         }
8475
8476         pci_disable_device(pdev);
8477         rtnl_unlock();
8478
8479         /* Request a slot slot reset. */
8480         return PCI_ERS_RESULT_NEED_RESET;
8481 }
8482
8483 /**
8484  * bnx2_io_slot_reset - called after the pci bus has been reset.
8485  * @pdev: Pointer to PCI device
8486  *
8487  * Restart the card from scratch, as if from a cold-boot.
8488  */
8489 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8490 {
8491         struct net_device *dev = pci_get_drvdata(pdev);
8492         struct bnx2 *bp = netdev_priv(dev);
8493
8494         rtnl_lock();
8495         if (pci_enable_device(pdev)) {
8496                 dev_err(&pdev->dev,
8497                         "Cannot re-enable PCI device after reset\n");
8498                 rtnl_unlock();
8499                 return PCI_ERS_RESULT_DISCONNECT;
8500         }
8501         pci_set_master(pdev);
8502         pci_restore_state(pdev);
8503         pci_save_state(pdev);
8504
8505         if (netif_running(dev)) {
8506                 bnx2_set_power_state(bp, PCI_D0);
8507                 bnx2_init_nic(bp, 1);
8508         }
8509
8510         rtnl_unlock();
8511         return PCI_ERS_RESULT_RECOVERED;
8512 }
8513
8514 /**
8515  * bnx2_io_resume - called when traffic can start flowing again.
8516  * @pdev: Pointer to PCI device
8517  *
8518  * This callback is called when the error recovery driver tells us that
8519  * its OK to resume normal operation.
8520  */
8521 static void bnx2_io_resume(struct pci_dev *pdev)
8522 {
8523         struct net_device *dev = pci_get_drvdata(pdev);
8524         struct bnx2 *bp = netdev_priv(dev);
8525
8526         rtnl_lock();
8527         if (netif_running(dev))
8528                 bnx2_netif_start(bp, true);
8529
8530         netif_device_attach(dev);
8531         rtnl_unlock();
8532 }
8533
8534 static struct pci_error_handlers bnx2_err_handler = {
8535         .error_detected = bnx2_io_error_detected,
8536         .slot_reset     = bnx2_io_slot_reset,
8537         .resume         = bnx2_io_resume,
8538 };
8539
8540 static struct pci_driver bnx2_pci_driver = {
8541         .name           = DRV_MODULE_NAME,
8542         .id_table       = bnx2_pci_tbl,
8543         .probe          = bnx2_init_one,
8544         .remove         = __devexit_p(bnx2_remove_one),
8545         .suspend        = bnx2_suspend,
8546         .resume         = bnx2_resume,
8547         .err_handler    = &bnx2_err_handler,
8548 };
8549
8550 static int __init bnx2_init(void)
8551 {
8552         return pci_register_driver(&bnx2_pci_driver);
8553 }
8554
8555 static void __exit bnx2_cleanup(void)
8556 {
8557         pci_unregister_driver(&bnx2_pci_driver);
8558 }
8559
8560 module_init(bnx2_init);
8561 module_exit(bnx2_cleanup);
8562
8563
8564