]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/net/tg3.c
[TG3]: Improve 5704S autoneg.
[mv-sheeva.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43
44 #include <asm/system.h>
45 #include <asm/io.h>
46 #include <asm/byteorder.h>
47 #include <asm/uaccess.h>
48
49 #ifdef CONFIG_SPARC64
50 #include <asm/idprom.h>
51 #include <asm/oplib.h>
52 #include <asm/pbm.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #ifdef NETIF_F_TSO
62 #define TG3_TSO_SUPPORT 1
63 #else
64 #define TG3_TSO_SUPPORT 0
65 #endif
66
67 #include "tg3.h"
68
69 #define DRV_MODULE_NAME         "tg3"
70 #define PFX DRV_MODULE_NAME     ": "
71 #define DRV_MODULE_VERSION      "3.65"
72 #define DRV_MODULE_RELDATE      "August 07, 2006"
73
74 #define TG3_DEF_MAC_MODE        0
75 #define TG3_DEF_RX_MODE         0
76 #define TG3_DEF_TX_MODE         0
77 #define TG3_DEF_MSG_ENABLE        \
78         (NETIF_MSG_DRV          | \
79          NETIF_MSG_PROBE        | \
80          NETIF_MSG_LINK         | \
81          NETIF_MSG_TIMER        | \
82          NETIF_MSG_IFDOWN       | \
83          NETIF_MSG_IFUP         | \
84          NETIF_MSG_RX_ERR       | \
85          NETIF_MSG_TX_ERR)
86
87 /* length of time before we decide the hardware is borked,
88  * and dev->tx_timeout() should be called to fix the problem
89  */
90 #define TG3_TX_TIMEOUT                  (5 * HZ)
91
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU                     60
94 #define TG3_MAX_MTU(tp) \
95         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
96
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98  * You can't change the ring sizes, but you can change where you place
99  * them in the NIC onboard memory.
100  */
101 #define TG3_RX_RING_SIZE                512
102 #define TG3_DEF_RX_RING_PENDING         200
103 #define TG3_RX_JUMBO_RING_SIZE          256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
105
106 /* Do not place this n-ring entries value into the tp struct itself,
107  * we really want to expose these constants to GCC so that modulo et
108  * al.  operations are done with shifts and masks instead of with
109  * hw multiply/modulo instructions.  Another solution would be to
110  * replace things like '% foo' with '& (foo - 1)'.
111  */
112 #define TG3_RX_RCB_RING_SIZE(tp)        \
113         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
114
115 #define TG3_TX_RING_SIZE                512
116 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
117
118 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_RING_SIZE)
120 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121                                  TG3_RX_JUMBO_RING_SIZE)
122 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
123                                    TG3_RX_RCB_RING_SIZE(tp))
124 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
125                                  TG3_TX_RING_SIZE)
126 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
127
128 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
129 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
130
131 /* minimum number of free TX descriptors required to wake up TX process */
132 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
133
134 /* number of ETHTOOL_GSTATS u64's */
135 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
136
137 #define TG3_NUM_TEST            6
138
139 static char version[] __devinitdata =
140         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
141
142 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
143 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
144 MODULE_LICENSE("GPL");
145 MODULE_VERSION(DRV_MODULE_VERSION);
146
147 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
148 module_param(tg3_debug, int, 0);
149 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
150
151 static struct pci_device_id tg3_pci_tbl[] = {
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
200         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
201         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
202         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
203         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
204         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
205         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
206         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
207         {}
208 };
209
210 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
211
212 static const struct {
213         const char string[ETH_GSTRING_LEN];
214 } ethtool_stats_keys[TG3_NUM_STATS] = {
215         { "rx_octets" },
216         { "rx_fragments" },
217         { "rx_ucast_packets" },
218         { "rx_mcast_packets" },
219         { "rx_bcast_packets" },
220         { "rx_fcs_errors" },
221         { "rx_align_errors" },
222         { "rx_xon_pause_rcvd" },
223         { "rx_xoff_pause_rcvd" },
224         { "rx_mac_ctrl_rcvd" },
225         { "rx_xoff_entered" },
226         { "rx_frame_too_long_errors" },
227         { "rx_jabbers" },
228         { "rx_undersize_packets" },
229         { "rx_in_length_errors" },
230         { "rx_out_length_errors" },
231         { "rx_64_or_less_octet_packets" },
232         { "rx_65_to_127_octet_packets" },
233         { "rx_128_to_255_octet_packets" },
234         { "rx_256_to_511_octet_packets" },
235         { "rx_512_to_1023_octet_packets" },
236         { "rx_1024_to_1522_octet_packets" },
237         { "rx_1523_to_2047_octet_packets" },
238         { "rx_2048_to_4095_octet_packets" },
239         { "rx_4096_to_8191_octet_packets" },
240         { "rx_8192_to_9022_octet_packets" },
241
242         { "tx_octets" },
243         { "tx_collisions" },
244
245         { "tx_xon_sent" },
246         { "tx_xoff_sent" },
247         { "tx_flow_control" },
248         { "tx_mac_errors" },
249         { "tx_single_collisions" },
250         { "tx_mult_collisions" },
251         { "tx_deferred" },
252         { "tx_excessive_collisions" },
253         { "tx_late_collisions" },
254         { "tx_collide_2times" },
255         { "tx_collide_3times" },
256         { "tx_collide_4times" },
257         { "tx_collide_5times" },
258         { "tx_collide_6times" },
259         { "tx_collide_7times" },
260         { "tx_collide_8times" },
261         { "tx_collide_9times" },
262         { "tx_collide_10times" },
263         { "tx_collide_11times" },
264         { "tx_collide_12times" },
265         { "tx_collide_13times" },
266         { "tx_collide_14times" },
267         { "tx_collide_15times" },
268         { "tx_ucast_packets" },
269         { "tx_mcast_packets" },
270         { "tx_bcast_packets" },
271         { "tx_carrier_sense_errors" },
272         { "tx_discards" },
273         { "tx_errors" },
274
275         { "dma_writeq_full" },
276         { "dma_write_prioq_full" },
277         { "rxbds_empty" },
278         { "rx_discards" },
279         { "rx_errors" },
280         { "rx_threshold_hit" },
281
282         { "dma_readq_full" },
283         { "dma_read_prioq_full" },
284         { "tx_comp_queue_full" },
285
286         { "ring_set_send_prod_index" },
287         { "ring_status_update" },
288         { "nic_irqs" },
289         { "nic_avoided_irqs" },
290         { "nic_tx_threshold_hit" }
291 };
292
293 static const struct {
294         const char string[ETH_GSTRING_LEN];
295 } ethtool_test_keys[TG3_NUM_TEST] = {
296         { "nvram test     (online) " },
297         { "link test      (online) " },
298         { "register test  (offline)" },
299         { "memory test    (offline)" },
300         { "loopback test  (offline)" },
301         { "interrupt test (offline)" },
302 };
303
304 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
305 {
306         writel(val, tp->regs + off);
307 }
308
309 static u32 tg3_read32(struct tg3 *tp, u32 off)
310 {
311         return (readl(tp->regs + off));
312 }
313
314 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
315 {
316         unsigned long flags;
317
318         spin_lock_irqsave(&tp->indirect_lock, flags);
319         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
320         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
321         spin_unlock_irqrestore(&tp->indirect_lock, flags);
322 }
323
324 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
325 {
326         writel(val, tp->regs + off);
327         readl(tp->regs + off);
328 }
329
330 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
331 {
332         unsigned long flags;
333         u32 val;
334
335         spin_lock_irqsave(&tp->indirect_lock, flags);
336         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
337         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
338         spin_unlock_irqrestore(&tp->indirect_lock, flags);
339         return val;
340 }
341
342 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
343 {
344         unsigned long flags;
345
346         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
347                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
348                                        TG3_64BIT_REG_LOW, val);
349                 return;
350         }
351         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
352                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
353                                        TG3_64BIT_REG_LOW, val);
354                 return;
355         }
356
357         spin_lock_irqsave(&tp->indirect_lock, flags);
358         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
359         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
360         spin_unlock_irqrestore(&tp->indirect_lock, flags);
361
362         /* In indirect mode when disabling interrupts, we also need
363          * to clear the interrupt bit in the GRC local ctrl register.
364          */
365         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
366             (val == 0x1)) {
367                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
368                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
369         }
370 }
371
372 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
373 {
374         unsigned long flags;
375         u32 val;
376
377         spin_lock_irqsave(&tp->indirect_lock, flags);
378         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
379         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
380         spin_unlock_irqrestore(&tp->indirect_lock, flags);
381         return val;
382 }
383
384 /* usec_wait specifies the wait time in usec when writing to certain registers
385  * where it is unsafe to read back the register without some delay.
386  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
387  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
388  */
389 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
390 {
391         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
392             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
393                 /* Non-posted methods */
394                 tp->write32(tp, off, val);
395         else {
396                 /* Posted method */
397                 tg3_write32(tp, off, val);
398                 if (usec_wait)
399                         udelay(usec_wait);
400                 tp->read32(tp, off);
401         }
402         /* Wait again after the read for the posted method to guarantee that
403          * the wait time is met.
404          */
405         if (usec_wait)
406                 udelay(usec_wait);
407 }
408
409 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
410 {
411         tp->write32_mbox(tp, off, val);
412         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
413             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
414                 tp->read32_mbox(tp, off);
415 }
416
417 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
418 {
419         void __iomem *mbox = tp->regs + off;
420         writel(val, mbox);
421         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
422                 writel(val, mbox);
423         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
424                 readl(mbox);
425 }
426
427 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
428 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
429 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
430 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
431 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
432
433 #define tw32(reg,val)           tp->write32(tp, reg, val)
434 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
435 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
436 #define tr32(reg)               tp->read32(tp, reg)
437
438 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
439 {
440         unsigned long flags;
441
442         spin_lock_irqsave(&tp->indirect_lock, flags);
443         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
444                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
445                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
446
447                 /* Always leave this as zero. */
448                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
449         } else {
450                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
451                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
452
453                 /* Always leave this as zero. */
454                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
455         }
456         spin_unlock_irqrestore(&tp->indirect_lock, flags);
457 }
458
459 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
460 {
461         unsigned long flags;
462
463         spin_lock_irqsave(&tp->indirect_lock, flags);
464         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
465                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
466                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
467
468                 /* Always leave this as zero. */
469                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
470         } else {
471                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
472                 *val = tr32(TG3PCI_MEM_WIN_DATA);
473
474                 /* Always leave this as zero. */
475                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
476         }
477         spin_unlock_irqrestore(&tp->indirect_lock, flags);
478 }
479
480 static void tg3_disable_ints(struct tg3 *tp)
481 {
482         tw32(TG3PCI_MISC_HOST_CTRL,
483              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
484         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
485 }
486
487 static inline void tg3_cond_int(struct tg3 *tp)
488 {
489         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
490             (tp->hw_status->status & SD_STATUS_UPDATED))
491                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
492 }
493
494 static void tg3_enable_ints(struct tg3 *tp)
495 {
496         tp->irq_sync = 0;
497         wmb();
498
499         tw32(TG3PCI_MISC_HOST_CTRL,
500              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
501         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
502                        (tp->last_tag << 24));
503         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
504                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
505                                (tp->last_tag << 24));
506         tg3_cond_int(tp);
507 }
508
509 static inline unsigned int tg3_has_work(struct tg3 *tp)
510 {
511         struct tg3_hw_status *sblk = tp->hw_status;
512         unsigned int work_exists = 0;
513
514         /* check for phy events */
515         if (!(tp->tg3_flags &
516               (TG3_FLAG_USE_LINKCHG_REG |
517                TG3_FLAG_POLL_SERDES))) {
518                 if (sblk->status & SD_STATUS_LINK_CHG)
519                         work_exists = 1;
520         }
521         /* check for RX/TX work to do */
522         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
523             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
524                 work_exists = 1;
525
526         return work_exists;
527 }
528
529 /* tg3_restart_ints
530  *  similar to tg3_enable_ints, but it accurately determines whether there
531  *  is new work pending and can return without flushing the PIO write
532  *  which reenables interrupts
533  */
534 static void tg3_restart_ints(struct tg3 *tp)
535 {
536         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
537                      tp->last_tag << 24);
538         mmiowb();
539
540         /* When doing tagged status, this work check is unnecessary.
541          * The last_tag we write above tells the chip which piece of
542          * work we've completed.
543          */
544         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
545             tg3_has_work(tp))
546                 tw32(HOSTCC_MODE, tp->coalesce_mode |
547                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
548 }
549
550 static inline void tg3_netif_stop(struct tg3 *tp)
551 {
552         tp->dev->trans_start = jiffies; /* prevent tx timeout */
553         netif_poll_disable(tp->dev);
554         netif_tx_disable(tp->dev);
555 }
556
557 static inline void tg3_netif_start(struct tg3 *tp)
558 {
559         netif_wake_queue(tp->dev);
560         /* NOTE: unconditional netif_wake_queue is only appropriate
561          * so long as all callers are assured to have free tx slots
562          * (such as after tg3_init_hw)
563          */
564         netif_poll_enable(tp->dev);
565         tp->hw_status->status |= SD_STATUS_UPDATED;
566         tg3_enable_ints(tp);
567 }
568
569 static void tg3_switch_clocks(struct tg3 *tp)
570 {
571         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
572         u32 orig_clock_ctrl;
573
574         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
575                 return;
576
577         orig_clock_ctrl = clock_ctrl;
578         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
579                        CLOCK_CTRL_CLKRUN_OENABLE |
580                        0x1f);
581         tp->pci_clock_ctrl = clock_ctrl;
582
583         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
584                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
585                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
586                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
587                 }
588         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
589                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
590                             clock_ctrl |
591                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
592                             40);
593                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
594                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
595                             40);
596         }
597         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
598 }
599
600 #define PHY_BUSY_LOOPS  5000
601
602 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
603 {
604         u32 frame_val;
605         unsigned int loops;
606         int ret;
607
608         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
609                 tw32_f(MAC_MI_MODE,
610                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
611                 udelay(80);
612         }
613
614         *val = 0x0;
615
616         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
617                       MI_COM_PHY_ADDR_MASK);
618         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
619                       MI_COM_REG_ADDR_MASK);
620         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
621
622         tw32_f(MAC_MI_COM, frame_val);
623
624         loops = PHY_BUSY_LOOPS;
625         while (loops != 0) {
626                 udelay(10);
627                 frame_val = tr32(MAC_MI_COM);
628
629                 if ((frame_val & MI_COM_BUSY) == 0) {
630                         udelay(5);
631                         frame_val = tr32(MAC_MI_COM);
632                         break;
633                 }
634                 loops -= 1;
635         }
636
637         ret = -EBUSY;
638         if (loops != 0) {
639                 *val = frame_val & MI_COM_DATA_MASK;
640                 ret = 0;
641         }
642
643         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
644                 tw32_f(MAC_MI_MODE, tp->mi_mode);
645                 udelay(80);
646         }
647
648         return ret;
649 }
650
651 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
652 {
653         u32 frame_val;
654         unsigned int loops;
655         int ret;
656
657         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
658                 tw32_f(MAC_MI_MODE,
659                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
660                 udelay(80);
661         }
662
663         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
664                       MI_COM_PHY_ADDR_MASK);
665         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
666                       MI_COM_REG_ADDR_MASK);
667         frame_val |= (val & MI_COM_DATA_MASK);
668         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
669
670         tw32_f(MAC_MI_COM, frame_val);
671
672         loops = PHY_BUSY_LOOPS;
673         while (loops != 0) {
674                 udelay(10);
675                 frame_val = tr32(MAC_MI_COM);
676                 if ((frame_val & MI_COM_BUSY) == 0) {
677                         udelay(5);
678                         frame_val = tr32(MAC_MI_COM);
679                         break;
680                 }
681                 loops -= 1;
682         }
683
684         ret = -EBUSY;
685         if (loops != 0)
686                 ret = 0;
687
688         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
689                 tw32_f(MAC_MI_MODE, tp->mi_mode);
690                 udelay(80);
691         }
692
693         return ret;
694 }
695
696 static void tg3_phy_set_wirespeed(struct tg3 *tp)
697 {
698         u32 val;
699
700         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
701                 return;
702
703         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
704             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
705                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
706                              (val | (1 << 15) | (1 << 4)));
707 }
708
709 static int tg3_bmcr_reset(struct tg3 *tp)
710 {
711         u32 phy_control;
712         int limit, err;
713
714         /* OK, reset it, and poll the BMCR_RESET bit until it
715          * clears or we time out.
716          */
717         phy_control = BMCR_RESET;
718         err = tg3_writephy(tp, MII_BMCR, phy_control);
719         if (err != 0)
720                 return -EBUSY;
721
722         limit = 5000;
723         while (limit--) {
724                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
725                 if (err != 0)
726                         return -EBUSY;
727
728                 if ((phy_control & BMCR_RESET) == 0) {
729                         udelay(40);
730                         break;
731                 }
732                 udelay(10);
733         }
734         if (limit <= 0)
735                 return -EBUSY;
736
737         return 0;
738 }
739
740 static int tg3_wait_macro_done(struct tg3 *tp)
741 {
742         int limit = 100;
743
744         while (limit--) {
745                 u32 tmp32;
746
747                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
748                         if ((tmp32 & 0x1000) == 0)
749                                 break;
750                 }
751         }
752         if (limit <= 0)
753                 return -EBUSY;
754
755         return 0;
756 }
757
758 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
759 {
760         static const u32 test_pat[4][6] = {
761         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
762         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
763         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
764         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
765         };
766         int chan;
767
768         for (chan = 0; chan < 4; chan++) {
769                 int i;
770
771                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
772                              (chan * 0x2000) | 0x0200);
773                 tg3_writephy(tp, 0x16, 0x0002);
774
775                 for (i = 0; i < 6; i++)
776                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
777                                      test_pat[chan][i]);
778
779                 tg3_writephy(tp, 0x16, 0x0202);
780                 if (tg3_wait_macro_done(tp)) {
781                         *resetp = 1;
782                         return -EBUSY;
783                 }
784
785                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
786                              (chan * 0x2000) | 0x0200);
787                 tg3_writephy(tp, 0x16, 0x0082);
788                 if (tg3_wait_macro_done(tp)) {
789                         *resetp = 1;
790                         return -EBUSY;
791                 }
792
793                 tg3_writephy(tp, 0x16, 0x0802);
794                 if (tg3_wait_macro_done(tp)) {
795                         *resetp = 1;
796                         return -EBUSY;
797                 }
798
799                 for (i = 0; i < 6; i += 2) {
800                         u32 low, high;
801
802                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
803                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
804                             tg3_wait_macro_done(tp)) {
805                                 *resetp = 1;
806                                 return -EBUSY;
807                         }
808                         low &= 0x7fff;
809                         high &= 0x000f;
810                         if (low != test_pat[chan][i] ||
811                             high != test_pat[chan][i+1]) {
812                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
813                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
814                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
815
816                                 return -EBUSY;
817                         }
818                 }
819         }
820
821         return 0;
822 }
823
824 static int tg3_phy_reset_chanpat(struct tg3 *tp)
825 {
826         int chan;
827
828         for (chan = 0; chan < 4; chan++) {
829                 int i;
830
831                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
832                              (chan * 0x2000) | 0x0200);
833                 tg3_writephy(tp, 0x16, 0x0002);
834                 for (i = 0; i < 6; i++)
835                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
836                 tg3_writephy(tp, 0x16, 0x0202);
837                 if (tg3_wait_macro_done(tp))
838                         return -EBUSY;
839         }
840
841         return 0;
842 }
843
844 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
845 {
846         u32 reg32, phy9_orig;
847         int retries, do_phy_reset, err;
848
849         retries = 10;
850         do_phy_reset = 1;
851         do {
852                 if (do_phy_reset) {
853                         err = tg3_bmcr_reset(tp);
854                         if (err)
855                                 return err;
856                         do_phy_reset = 0;
857                 }
858
859                 /* Disable transmitter and interrupt.  */
860                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
861                         continue;
862
863                 reg32 |= 0x3000;
864                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
865
866                 /* Set full-duplex, 1000 mbps.  */
867                 tg3_writephy(tp, MII_BMCR,
868                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
869
870                 /* Set to master mode.  */
871                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
872                         continue;
873
874                 tg3_writephy(tp, MII_TG3_CTRL,
875                              (MII_TG3_CTRL_AS_MASTER |
876                               MII_TG3_CTRL_ENABLE_AS_MASTER));
877
878                 /* Enable SM_DSP_CLOCK and 6dB.  */
879                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
880
881                 /* Block the PHY control access.  */
882                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
883                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
884
885                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
886                 if (!err)
887                         break;
888         } while (--retries);
889
890         err = tg3_phy_reset_chanpat(tp);
891         if (err)
892                 return err;
893
894         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
895         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
896
897         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
898         tg3_writephy(tp, 0x16, 0x0000);
899
900         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
901             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
902                 /* Set Extended packet length bit for jumbo frames */
903                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
904         }
905         else {
906                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
907         }
908
909         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
910
911         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
912                 reg32 &= ~0x3000;
913                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
914         } else if (!err)
915                 err = -EBUSY;
916
917         return err;
918 }
919
920 static void tg3_link_report(struct tg3 *);
921
922 /* This will reset the tigon3 PHY if there is no valid
923  * link unless the FORCE argument is non-zero.
924  */
925 static int tg3_phy_reset(struct tg3 *tp)
926 {
927         u32 phy_status;
928         int err;
929
930         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
931         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
932         if (err != 0)
933                 return -EBUSY;
934
935         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
936                 netif_carrier_off(tp->dev);
937                 tg3_link_report(tp);
938         }
939
940         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
941             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
942             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
943                 err = tg3_phy_reset_5703_4_5(tp);
944                 if (err)
945                         return err;
946                 goto out;
947         }
948
949         err = tg3_bmcr_reset(tp);
950         if (err)
951                 return err;
952
953 out:
954         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
955                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
956                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
957                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
958                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
959                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
960                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
961         }
962         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
963                 tg3_writephy(tp, 0x1c, 0x8d68);
964                 tg3_writephy(tp, 0x1c, 0x8d68);
965         }
966         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
967                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
968                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
969                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
970                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
971                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
972                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
973                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
974                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
975         }
976         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
977                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
978                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
979                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
980                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
981         }
982         /* Set Extended packet length bit (bit 14) on all chips that */
983         /* support jumbo frames */
984         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
985                 /* Cannot do read-modify-write on 5401 */
986                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
987         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
988                 u32 phy_reg;
989
990                 /* Set bit 14 with read-modify-write to preserve other bits */
991                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
992                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
993                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
994         }
995
996         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
997          * jumbo frames transmission.
998          */
999         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1000                 u32 phy_reg;
1001
1002                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1003                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1004                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1005         }
1006
1007         tg3_phy_set_wirespeed(tp);
1008         return 0;
1009 }
1010
1011 static void tg3_frob_aux_power(struct tg3 *tp)
1012 {
1013         struct tg3 *tp_peer = tp;
1014
1015         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1016                 return;
1017
1018         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1019             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1020                 struct net_device *dev_peer;
1021
1022                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1023                 /* remove_one() may have been run on the peer. */
1024                 if (!dev_peer)
1025                         tp_peer = tp;
1026                 else
1027                         tp_peer = netdev_priv(dev_peer);
1028         }
1029
1030         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1031             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1032             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1033             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1034                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1035                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1036                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1037                                     (GRC_LCLCTRL_GPIO_OE0 |
1038                                      GRC_LCLCTRL_GPIO_OE1 |
1039                                      GRC_LCLCTRL_GPIO_OE2 |
1040                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1041                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1042                                     100);
1043                 } else {
1044                         u32 no_gpio2;
1045                         u32 grc_local_ctrl = 0;
1046
1047                         if (tp_peer != tp &&
1048                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1049                                 return;
1050
1051                         /* Workaround to prevent overdrawing Amps. */
1052                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1053                             ASIC_REV_5714) {
1054                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1055                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1056                                             grc_local_ctrl, 100);
1057                         }
1058
1059                         /* On 5753 and variants, GPIO2 cannot be used. */
1060                         no_gpio2 = tp->nic_sram_data_cfg &
1061                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1062
1063                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1064                                          GRC_LCLCTRL_GPIO_OE1 |
1065                                          GRC_LCLCTRL_GPIO_OE2 |
1066                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1067                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1068                         if (no_gpio2) {
1069                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1070                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1071                         }
1072                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1073                                                     grc_local_ctrl, 100);
1074
1075                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1076
1077                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1078                                                     grc_local_ctrl, 100);
1079
1080                         if (!no_gpio2) {
1081                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1082                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1083                                             grc_local_ctrl, 100);
1084                         }
1085                 }
1086         } else {
1087                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1088                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1089                         if (tp_peer != tp &&
1090                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1091                                 return;
1092
1093                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1094                                     (GRC_LCLCTRL_GPIO_OE1 |
1095                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1096
1097                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1098                                     GRC_LCLCTRL_GPIO_OE1, 100);
1099
1100                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1101                                     (GRC_LCLCTRL_GPIO_OE1 |
1102                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1103                 }
1104         }
1105 }
1106
1107 static int tg3_setup_phy(struct tg3 *, int);
1108
1109 #define RESET_KIND_SHUTDOWN     0
1110 #define RESET_KIND_INIT         1
1111 #define RESET_KIND_SUSPEND      2
1112
1113 static void tg3_write_sig_post_reset(struct tg3 *, int);
1114 static int tg3_halt_cpu(struct tg3 *, u32);
1115 static int tg3_nvram_lock(struct tg3 *);
1116 static void tg3_nvram_unlock(struct tg3 *);
1117
1118 static void tg3_power_down_phy(struct tg3 *tp)
1119 {
1120         /* The PHY should not be powered down on some chips because
1121          * of bugs.
1122          */
1123         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1124             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1125             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1126              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1127                 return;
1128         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1129 }
1130
1131 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1132 {
1133         u32 misc_host_ctrl;
1134         u16 power_control, power_caps;
1135         int pm = tp->pm_cap;
1136
1137         /* Make sure register accesses (indirect or otherwise)
1138          * will function correctly.
1139          */
1140         pci_write_config_dword(tp->pdev,
1141                                TG3PCI_MISC_HOST_CTRL,
1142                                tp->misc_host_ctrl);
1143
1144         pci_read_config_word(tp->pdev,
1145                              pm + PCI_PM_CTRL,
1146                              &power_control);
1147         power_control |= PCI_PM_CTRL_PME_STATUS;
1148         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1149         switch (state) {
1150         case PCI_D0:
1151                 power_control |= 0;
1152                 pci_write_config_word(tp->pdev,
1153                                       pm + PCI_PM_CTRL,
1154                                       power_control);
1155                 udelay(100);    /* Delay after power state change */
1156
1157                 /* Switch out of Vaux if it is not a LOM */
1158                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1159                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1160
1161                 return 0;
1162
1163         case PCI_D1:
1164                 power_control |= 1;
1165                 break;
1166
1167         case PCI_D2:
1168                 power_control |= 2;
1169                 break;
1170
1171         case PCI_D3hot:
1172                 power_control |= 3;
1173                 break;
1174
1175         default:
1176                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1177                        "requested.\n",
1178                        tp->dev->name, state);
1179                 return -EINVAL;
1180         };
1181
1182         power_control |= PCI_PM_CTRL_PME_ENABLE;
1183
1184         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1185         tw32(TG3PCI_MISC_HOST_CTRL,
1186              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1187
1188         if (tp->link_config.phy_is_low_power == 0) {
1189                 tp->link_config.phy_is_low_power = 1;
1190                 tp->link_config.orig_speed = tp->link_config.speed;
1191                 tp->link_config.orig_duplex = tp->link_config.duplex;
1192                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1193         }
1194
1195         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1196                 tp->link_config.speed = SPEED_10;
1197                 tp->link_config.duplex = DUPLEX_HALF;
1198                 tp->link_config.autoneg = AUTONEG_ENABLE;
1199                 tg3_setup_phy(tp, 0);
1200         }
1201
1202         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1203                 int i;
1204                 u32 val;
1205
1206                 for (i = 0; i < 200; i++) {
1207                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1208                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1209                                 break;
1210                         msleep(1);
1211                 }
1212         }
1213         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1214                                              WOL_DRV_STATE_SHUTDOWN |
1215                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1216
1217         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1218
1219         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1220                 u32 mac_mode;
1221
1222                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1223                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1224                         udelay(40);
1225
1226                         mac_mode = MAC_MODE_PORT_MODE_MII;
1227
1228                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1229                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1230                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1231                 } else {
1232                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1233                 }
1234
1235                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1236                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1237
1238                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1239                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1240                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1241
1242                 tw32_f(MAC_MODE, mac_mode);
1243                 udelay(100);
1244
1245                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1246                 udelay(10);
1247         }
1248
1249         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1250             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1251              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1252                 u32 base_val;
1253
1254                 base_val = tp->pci_clock_ctrl;
1255                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1256                              CLOCK_CTRL_TXCLK_DISABLE);
1257
1258                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1259                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1260         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1261                 /* do nothing */
1262         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1263                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1264                 u32 newbits1, newbits2;
1265
1266                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1267                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1268                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1269                                     CLOCK_CTRL_TXCLK_DISABLE |
1270                                     CLOCK_CTRL_ALTCLK);
1271                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1272                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1273                         newbits1 = CLOCK_CTRL_625_CORE;
1274                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1275                 } else {
1276                         newbits1 = CLOCK_CTRL_ALTCLK;
1277                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1278                 }
1279
1280                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1281                             40);
1282
1283                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1284                             40);
1285
1286                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1287                         u32 newbits3;
1288
1289                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1290                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1291                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1292                                             CLOCK_CTRL_TXCLK_DISABLE |
1293                                             CLOCK_CTRL_44MHZ_CORE);
1294                         } else {
1295                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1296                         }
1297
1298                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1299                                     tp->pci_clock_ctrl | newbits3, 40);
1300                 }
1301         }
1302
1303         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1304             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1305                 /* Turn off the PHY */
1306                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1307                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1308                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1309                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1310                         tg3_power_down_phy(tp);
1311                 }
1312         }
1313
1314         tg3_frob_aux_power(tp);
1315
1316         /* Workaround for unstable PLL clock */
1317         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1318             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1319                 u32 val = tr32(0x7d00);
1320
1321                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1322                 tw32(0x7d00, val);
1323                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1324                         int err;
1325
1326                         err = tg3_nvram_lock(tp);
1327                         tg3_halt_cpu(tp, RX_CPU_BASE);
1328                         if (!err)
1329                                 tg3_nvram_unlock(tp);
1330                 }
1331         }
1332
1333         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1334
1335         /* Finally, set the new power state. */
1336         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1337         udelay(100);    /* Delay after power state change */
1338
1339         return 0;
1340 }
1341
1342 static void tg3_link_report(struct tg3 *tp)
1343 {
1344         if (!netif_carrier_ok(tp->dev)) {
1345                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1346         } else {
1347                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1348                        tp->dev->name,
1349                        (tp->link_config.active_speed == SPEED_1000 ?
1350                         1000 :
1351                         (tp->link_config.active_speed == SPEED_100 ?
1352                          100 : 10)),
1353                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1354                         "full" : "half"));
1355
1356                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1357                        "%s for RX.\n",
1358                        tp->dev->name,
1359                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1360                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1361         }
1362 }
1363
1364 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1365 {
1366         u32 new_tg3_flags = 0;
1367         u32 old_rx_mode = tp->rx_mode;
1368         u32 old_tx_mode = tp->tx_mode;
1369
1370         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1371
1372                 /* Convert 1000BaseX flow control bits to 1000BaseT
1373                  * bits before resolving flow control.
1374                  */
1375                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1376                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1377                                        ADVERTISE_PAUSE_ASYM);
1378                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1379
1380                         if (local_adv & ADVERTISE_1000XPAUSE)
1381                                 local_adv |= ADVERTISE_PAUSE_CAP;
1382                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1383                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1384                         if (remote_adv & LPA_1000XPAUSE)
1385                                 remote_adv |= LPA_PAUSE_CAP;
1386                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1387                                 remote_adv |= LPA_PAUSE_ASYM;
1388                 }
1389
1390                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1391                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1392                                 if (remote_adv & LPA_PAUSE_CAP)
1393                                         new_tg3_flags |=
1394                                                 (TG3_FLAG_RX_PAUSE |
1395                                                 TG3_FLAG_TX_PAUSE);
1396                                 else if (remote_adv & LPA_PAUSE_ASYM)
1397                                         new_tg3_flags |=
1398                                                 (TG3_FLAG_RX_PAUSE);
1399                         } else {
1400                                 if (remote_adv & LPA_PAUSE_CAP)
1401                                         new_tg3_flags |=
1402                                                 (TG3_FLAG_RX_PAUSE |
1403                                                 TG3_FLAG_TX_PAUSE);
1404                         }
1405                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1406                         if ((remote_adv & LPA_PAUSE_CAP) &&
1407                         (remote_adv & LPA_PAUSE_ASYM))
1408                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1409                 }
1410
1411                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1412                 tp->tg3_flags |= new_tg3_flags;
1413         } else {
1414                 new_tg3_flags = tp->tg3_flags;
1415         }
1416
1417         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1418                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1419         else
1420                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1421
1422         if (old_rx_mode != tp->rx_mode) {
1423                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1424         }
1425
1426         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1427                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1428         else
1429                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1430
1431         if (old_tx_mode != tp->tx_mode) {
1432                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1433         }
1434 }
1435
1436 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1437 {
1438         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1439         case MII_TG3_AUX_STAT_10HALF:
1440                 *speed = SPEED_10;
1441                 *duplex = DUPLEX_HALF;
1442                 break;
1443
1444         case MII_TG3_AUX_STAT_10FULL:
1445                 *speed = SPEED_10;
1446                 *duplex = DUPLEX_FULL;
1447                 break;
1448
1449         case MII_TG3_AUX_STAT_100HALF:
1450                 *speed = SPEED_100;
1451                 *duplex = DUPLEX_HALF;
1452                 break;
1453
1454         case MII_TG3_AUX_STAT_100FULL:
1455                 *speed = SPEED_100;
1456                 *duplex = DUPLEX_FULL;
1457                 break;
1458
1459         case MII_TG3_AUX_STAT_1000HALF:
1460                 *speed = SPEED_1000;
1461                 *duplex = DUPLEX_HALF;
1462                 break;
1463
1464         case MII_TG3_AUX_STAT_1000FULL:
1465                 *speed = SPEED_1000;
1466                 *duplex = DUPLEX_FULL;
1467                 break;
1468
1469         default:
1470                 *speed = SPEED_INVALID;
1471                 *duplex = DUPLEX_INVALID;
1472                 break;
1473         };
1474 }
1475
1476 static void tg3_phy_copper_begin(struct tg3 *tp)
1477 {
1478         u32 new_adv;
1479         int i;
1480
1481         if (tp->link_config.phy_is_low_power) {
1482                 /* Entering low power mode.  Disable gigabit and
1483                  * 100baseT advertisements.
1484                  */
1485                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1486
1487                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1488                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1489                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1490                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1491
1492                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1493         } else if (tp->link_config.speed == SPEED_INVALID) {
1494                 tp->link_config.advertising =
1495                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1496                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1497                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1498                          ADVERTISED_Autoneg | ADVERTISED_MII);
1499
1500                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1501                         tp->link_config.advertising &=
1502                                 ~(ADVERTISED_1000baseT_Half |
1503                                   ADVERTISED_1000baseT_Full);
1504
1505                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1506                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1507                         new_adv |= ADVERTISE_10HALF;
1508                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1509                         new_adv |= ADVERTISE_10FULL;
1510                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1511                         new_adv |= ADVERTISE_100HALF;
1512                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1513                         new_adv |= ADVERTISE_100FULL;
1514                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1515
1516                 if (tp->link_config.advertising &
1517                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1518                         new_adv = 0;
1519                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1520                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1521                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1522                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1523                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1524                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1525                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1526                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1527                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1528                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1529                 } else {
1530                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1531                 }
1532         } else {
1533                 /* Asking for a specific link mode. */
1534                 if (tp->link_config.speed == SPEED_1000) {
1535                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1536                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1537
1538                         if (tp->link_config.duplex == DUPLEX_FULL)
1539                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1540                         else
1541                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1542                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1543                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1544                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1545                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1546                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1547                 } else {
1548                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1549
1550                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1551                         if (tp->link_config.speed == SPEED_100) {
1552                                 if (tp->link_config.duplex == DUPLEX_FULL)
1553                                         new_adv |= ADVERTISE_100FULL;
1554                                 else
1555                                         new_adv |= ADVERTISE_100HALF;
1556                         } else {
1557                                 if (tp->link_config.duplex == DUPLEX_FULL)
1558                                         new_adv |= ADVERTISE_10FULL;
1559                                 else
1560                                         new_adv |= ADVERTISE_10HALF;
1561                         }
1562                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1563                 }
1564         }
1565
1566         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1567             tp->link_config.speed != SPEED_INVALID) {
1568                 u32 bmcr, orig_bmcr;
1569
1570                 tp->link_config.active_speed = tp->link_config.speed;
1571                 tp->link_config.active_duplex = tp->link_config.duplex;
1572
1573                 bmcr = 0;
1574                 switch (tp->link_config.speed) {
1575                 default:
1576                 case SPEED_10:
1577                         break;
1578
1579                 case SPEED_100:
1580                         bmcr |= BMCR_SPEED100;
1581                         break;
1582
1583                 case SPEED_1000:
1584                         bmcr |= TG3_BMCR_SPEED1000;
1585                         break;
1586                 };
1587
1588                 if (tp->link_config.duplex == DUPLEX_FULL)
1589                         bmcr |= BMCR_FULLDPLX;
1590
1591                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1592                     (bmcr != orig_bmcr)) {
1593                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1594                         for (i = 0; i < 1500; i++) {
1595                                 u32 tmp;
1596
1597                                 udelay(10);
1598                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1599                                     tg3_readphy(tp, MII_BMSR, &tmp))
1600                                         continue;
1601                                 if (!(tmp & BMSR_LSTATUS)) {
1602                                         udelay(40);
1603                                         break;
1604                                 }
1605                         }
1606                         tg3_writephy(tp, MII_BMCR, bmcr);
1607                         udelay(40);
1608                 }
1609         } else {
1610                 tg3_writephy(tp, MII_BMCR,
1611                              BMCR_ANENABLE | BMCR_ANRESTART);
1612         }
1613 }
1614
1615 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1616 {
1617         int err;
1618
1619         /* Turn off tap power management. */
1620         /* Set Extended packet length bit */
1621         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1622
1623         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1624         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1625
1626         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1627         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1628
1629         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1630         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1631
1632         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1633         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1634
1635         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1636         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1637
1638         udelay(40);
1639
1640         return err;
1641 }
1642
1643 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1644 {
1645         u32 adv_reg, all_mask;
1646
1647         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1648                 return 0;
1649
1650         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1651                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1652         if ((adv_reg & all_mask) != all_mask)
1653                 return 0;
1654         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1655                 u32 tg3_ctrl;
1656
1657                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1658                         return 0;
1659
1660                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1661                             MII_TG3_CTRL_ADV_1000_FULL);
1662                 if ((tg3_ctrl & all_mask) != all_mask)
1663                         return 0;
1664         }
1665         return 1;
1666 }
1667
1668 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1669 {
1670         int current_link_up;
1671         u32 bmsr, dummy;
1672         u16 current_speed;
1673         u8 current_duplex;
1674         int i, err;
1675
1676         tw32(MAC_EVENT, 0);
1677
1678         tw32_f(MAC_STATUS,
1679              (MAC_STATUS_SYNC_CHANGED |
1680               MAC_STATUS_CFG_CHANGED |
1681               MAC_STATUS_MI_COMPLETION |
1682               MAC_STATUS_LNKSTATE_CHANGED));
1683         udelay(40);
1684
1685         tp->mi_mode = MAC_MI_MODE_BASE;
1686         tw32_f(MAC_MI_MODE, tp->mi_mode);
1687         udelay(80);
1688
1689         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1690
1691         /* Some third-party PHYs need to be reset on link going
1692          * down.
1693          */
1694         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1695              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1696              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1697             netif_carrier_ok(tp->dev)) {
1698                 tg3_readphy(tp, MII_BMSR, &bmsr);
1699                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1700                     !(bmsr & BMSR_LSTATUS))
1701                         force_reset = 1;
1702         }
1703         if (force_reset)
1704                 tg3_phy_reset(tp);
1705
1706         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1707                 tg3_readphy(tp, MII_BMSR, &bmsr);
1708                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1709                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1710                         bmsr = 0;
1711
1712                 if (!(bmsr & BMSR_LSTATUS)) {
1713                         err = tg3_init_5401phy_dsp(tp);
1714                         if (err)
1715                                 return err;
1716
1717                         tg3_readphy(tp, MII_BMSR, &bmsr);
1718                         for (i = 0; i < 1000; i++) {
1719                                 udelay(10);
1720                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1721                                     (bmsr & BMSR_LSTATUS)) {
1722                                         udelay(40);
1723                                         break;
1724                                 }
1725                         }
1726
1727                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1728                             !(bmsr & BMSR_LSTATUS) &&
1729                             tp->link_config.active_speed == SPEED_1000) {
1730                                 err = tg3_phy_reset(tp);
1731                                 if (!err)
1732                                         err = tg3_init_5401phy_dsp(tp);
1733                                 if (err)
1734                                         return err;
1735                         }
1736                 }
1737         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1738                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1739                 /* 5701 {A0,B0} CRC bug workaround */
1740                 tg3_writephy(tp, 0x15, 0x0a75);
1741                 tg3_writephy(tp, 0x1c, 0x8c68);
1742                 tg3_writephy(tp, 0x1c, 0x8d68);
1743                 tg3_writephy(tp, 0x1c, 0x8c68);
1744         }
1745
1746         /* Clear pending interrupts... */
1747         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1748         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1749
1750         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1751                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1752         else
1753                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1754
1755         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1756             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1757                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1758                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1759                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1760                 else
1761                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1762         }
1763
1764         current_link_up = 0;
1765         current_speed = SPEED_INVALID;
1766         current_duplex = DUPLEX_INVALID;
1767
1768         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1769                 u32 val;
1770
1771                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1772                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1773                 if (!(val & (1 << 10))) {
1774                         val |= (1 << 10);
1775                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1776                         goto relink;
1777                 }
1778         }
1779
1780         bmsr = 0;
1781         for (i = 0; i < 100; i++) {
1782                 tg3_readphy(tp, MII_BMSR, &bmsr);
1783                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1784                     (bmsr & BMSR_LSTATUS))
1785                         break;
1786                 udelay(40);
1787         }
1788
1789         if (bmsr & BMSR_LSTATUS) {
1790                 u32 aux_stat, bmcr;
1791
1792                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1793                 for (i = 0; i < 2000; i++) {
1794                         udelay(10);
1795                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1796                             aux_stat)
1797                                 break;
1798                 }
1799
1800                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1801                                              &current_speed,
1802                                              &current_duplex);
1803
1804                 bmcr = 0;
1805                 for (i = 0; i < 200; i++) {
1806                         tg3_readphy(tp, MII_BMCR, &bmcr);
1807                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1808                                 continue;
1809                         if (bmcr && bmcr != 0x7fff)
1810                                 break;
1811                         udelay(10);
1812                 }
1813
1814                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1815                         if (bmcr & BMCR_ANENABLE) {
1816                                 current_link_up = 1;
1817
1818                                 /* Force autoneg restart if we are exiting
1819                                  * low power mode.
1820                                  */
1821                                 if (!tg3_copper_is_advertising_all(tp))
1822                                         current_link_up = 0;
1823                         } else {
1824                                 current_link_up = 0;
1825                         }
1826                 } else {
1827                         if (!(bmcr & BMCR_ANENABLE) &&
1828                             tp->link_config.speed == current_speed &&
1829                             tp->link_config.duplex == current_duplex) {
1830                                 current_link_up = 1;
1831                         } else {
1832                                 current_link_up = 0;
1833                         }
1834                 }
1835
1836                 tp->link_config.active_speed = current_speed;
1837                 tp->link_config.active_duplex = current_duplex;
1838         }
1839
1840         if (current_link_up == 1 &&
1841             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1842             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1843                 u32 local_adv, remote_adv;
1844
1845                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1846                         local_adv = 0;
1847                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1848
1849                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1850                         remote_adv = 0;
1851
1852                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1853
1854                 /* If we are not advertising full pause capability,
1855                  * something is wrong.  Bring the link down and reconfigure.
1856                  */
1857                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1858                         current_link_up = 0;
1859                 } else {
1860                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1861                 }
1862         }
1863 relink:
1864         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1865                 u32 tmp;
1866
1867                 tg3_phy_copper_begin(tp);
1868
1869                 tg3_readphy(tp, MII_BMSR, &tmp);
1870                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1871                     (tmp & BMSR_LSTATUS))
1872                         current_link_up = 1;
1873         }
1874
1875         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1876         if (current_link_up == 1) {
1877                 if (tp->link_config.active_speed == SPEED_100 ||
1878                     tp->link_config.active_speed == SPEED_10)
1879                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1880                 else
1881                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1882         } else
1883                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1884
1885         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1886         if (tp->link_config.active_duplex == DUPLEX_HALF)
1887                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1888
1889         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1890         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1891                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1892                     (current_link_up == 1 &&
1893                      tp->link_config.active_speed == SPEED_10))
1894                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1895         } else {
1896                 if (current_link_up == 1)
1897                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1898         }
1899
1900         /* ??? Without this setting Netgear GA302T PHY does not
1901          * ??? send/receive packets...
1902          */
1903         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1904             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1905                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1906                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1907                 udelay(80);
1908         }
1909
1910         tw32_f(MAC_MODE, tp->mac_mode);
1911         udelay(40);
1912
1913         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1914                 /* Polled via timer. */
1915                 tw32_f(MAC_EVENT, 0);
1916         } else {
1917                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1918         }
1919         udelay(40);
1920
1921         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1922             current_link_up == 1 &&
1923             tp->link_config.active_speed == SPEED_1000 &&
1924             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1925              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1926                 udelay(120);
1927                 tw32_f(MAC_STATUS,
1928                      (MAC_STATUS_SYNC_CHANGED |
1929                       MAC_STATUS_CFG_CHANGED));
1930                 udelay(40);
1931                 tg3_write_mem(tp,
1932                               NIC_SRAM_FIRMWARE_MBOX,
1933                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1934         }
1935
1936         if (current_link_up != netif_carrier_ok(tp->dev)) {
1937                 if (current_link_up)
1938                         netif_carrier_on(tp->dev);
1939                 else
1940                         netif_carrier_off(tp->dev);
1941                 tg3_link_report(tp);
1942         }
1943
1944         return 0;
1945 }
1946
1947 struct tg3_fiber_aneginfo {
1948         int state;
1949 #define ANEG_STATE_UNKNOWN              0
1950 #define ANEG_STATE_AN_ENABLE            1
1951 #define ANEG_STATE_RESTART_INIT         2
1952 #define ANEG_STATE_RESTART              3
1953 #define ANEG_STATE_DISABLE_LINK_OK      4
1954 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1955 #define ANEG_STATE_ABILITY_DETECT       6
1956 #define ANEG_STATE_ACK_DETECT_INIT      7
1957 #define ANEG_STATE_ACK_DETECT           8
1958 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1959 #define ANEG_STATE_COMPLETE_ACK         10
1960 #define ANEG_STATE_IDLE_DETECT_INIT     11
1961 #define ANEG_STATE_IDLE_DETECT          12
1962 #define ANEG_STATE_LINK_OK              13
1963 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1964 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1965
1966         u32 flags;
1967 #define MR_AN_ENABLE            0x00000001
1968 #define MR_RESTART_AN           0x00000002
1969 #define MR_AN_COMPLETE          0x00000004
1970 #define MR_PAGE_RX              0x00000008
1971 #define MR_NP_LOADED            0x00000010
1972 #define MR_TOGGLE_TX            0x00000020
1973 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1974 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1975 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1976 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1977 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1978 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1979 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1980 #define MR_TOGGLE_RX            0x00002000
1981 #define MR_NP_RX                0x00004000
1982
1983 #define MR_LINK_OK              0x80000000
1984
1985         unsigned long link_time, cur_time;
1986
1987         u32 ability_match_cfg;
1988         int ability_match_count;
1989
1990         char ability_match, idle_match, ack_match;
1991
1992         u32 txconfig, rxconfig;
1993 #define ANEG_CFG_NP             0x00000080
1994 #define ANEG_CFG_ACK            0x00000040
1995 #define ANEG_CFG_RF2            0x00000020
1996 #define ANEG_CFG_RF1            0x00000010
1997 #define ANEG_CFG_PS2            0x00000001
1998 #define ANEG_CFG_PS1            0x00008000
1999 #define ANEG_CFG_HD             0x00004000
2000 #define ANEG_CFG_FD             0x00002000
2001 #define ANEG_CFG_INVAL          0x00001f06
2002
2003 };
2004 #define ANEG_OK         0
2005 #define ANEG_DONE       1
2006 #define ANEG_TIMER_ENAB 2
2007 #define ANEG_FAILED     -1
2008
2009 #define ANEG_STATE_SETTLE_TIME  10000
2010
2011 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2012                                    struct tg3_fiber_aneginfo *ap)
2013 {
2014         unsigned long delta;
2015         u32 rx_cfg_reg;
2016         int ret;
2017
2018         if (ap->state == ANEG_STATE_UNKNOWN) {
2019                 ap->rxconfig = 0;
2020                 ap->link_time = 0;
2021                 ap->cur_time = 0;
2022                 ap->ability_match_cfg = 0;
2023                 ap->ability_match_count = 0;
2024                 ap->ability_match = 0;
2025                 ap->idle_match = 0;
2026                 ap->ack_match = 0;
2027         }
2028         ap->cur_time++;
2029
2030         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2031                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2032
2033                 if (rx_cfg_reg != ap->ability_match_cfg) {
2034                         ap->ability_match_cfg = rx_cfg_reg;
2035                         ap->ability_match = 0;
2036                         ap->ability_match_count = 0;
2037                 } else {
2038                         if (++ap->ability_match_count > 1) {
2039                                 ap->ability_match = 1;
2040                                 ap->ability_match_cfg = rx_cfg_reg;
2041                         }
2042                 }
2043                 if (rx_cfg_reg & ANEG_CFG_ACK)
2044                         ap->ack_match = 1;
2045                 else
2046                         ap->ack_match = 0;
2047
2048                 ap->idle_match = 0;
2049         } else {
2050                 ap->idle_match = 1;
2051                 ap->ability_match_cfg = 0;
2052                 ap->ability_match_count = 0;
2053                 ap->ability_match = 0;
2054                 ap->ack_match = 0;
2055
2056                 rx_cfg_reg = 0;
2057         }
2058
2059         ap->rxconfig = rx_cfg_reg;
2060         ret = ANEG_OK;
2061
2062         switch(ap->state) {
2063         case ANEG_STATE_UNKNOWN:
2064                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2065                         ap->state = ANEG_STATE_AN_ENABLE;
2066
2067                 /* fallthru */
2068         case ANEG_STATE_AN_ENABLE:
2069                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2070                 if (ap->flags & MR_AN_ENABLE) {
2071                         ap->link_time = 0;
2072                         ap->cur_time = 0;
2073                         ap->ability_match_cfg = 0;
2074                         ap->ability_match_count = 0;
2075                         ap->ability_match = 0;
2076                         ap->idle_match = 0;
2077                         ap->ack_match = 0;
2078
2079                         ap->state = ANEG_STATE_RESTART_INIT;
2080                 } else {
2081                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2082                 }
2083                 break;
2084
2085         case ANEG_STATE_RESTART_INIT:
2086                 ap->link_time = ap->cur_time;
2087                 ap->flags &= ~(MR_NP_LOADED);
2088                 ap->txconfig = 0;
2089                 tw32(MAC_TX_AUTO_NEG, 0);
2090                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2091                 tw32_f(MAC_MODE, tp->mac_mode);
2092                 udelay(40);
2093
2094                 ret = ANEG_TIMER_ENAB;
2095                 ap->state = ANEG_STATE_RESTART;
2096
2097                 /* fallthru */
2098         case ANEG_STATE_RESTART:
2099                 delta = ap->cur_time - ap->link_time;
2100                 if (delta > ANEG_STATE_SETTLE_TIME) {
2101                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2102                 } else {
2103                         ret = ANEG_TIMER_ENAB;
2104                 }
2105                 break;
2106
2107         case ANEG_STATE_DISABLE_LINK_OK:
2108                 ret = ANEG_DONE;
2109                 break;
2110
2111         case ANEG_STATE_ABILITY_DETECT_INIT:
2112                 ap->flags &= ~(MR_TOGGLE_TX);
2113                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2114                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2115                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2116                 tw32_f(MAC_MODE, tp->mac_mode);
2117                 udelay(40);
2118
2119                 ap->state = ANEG_STATE_ABILITY_DETECT;
2120                 break;
2121
2122         case ANEG_STATE_ABILITY_DETECT:
2123                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2124                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2125                 }
2126                 break;
2127
2128         case ANEG_STATE_ACK_DETECT_INIT:
2129                 ap->txconfig |= ANEG_CFG_ACK;
2130                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2131                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2132                 tw32_f(MAC_MODE, tp->mac_mode);
2133                 udelay(40);
2134
2135                 ap->state = ANEG_STATE_ACK_DETECT;
2136
2137                 /* fallthru */
2138         case ANEG_STATE_ACK_DETECT:
2139                 if (ap->ack_match != 0) {
2140                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2141                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2142                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2143                         } else {
2144                                 ap->state = ANEG_STATE_AN_ENABLE;
2145                         }
2146                 } else if (ap->ability_match != 0 &&
2147                            ap->rxconfig == 0) {
2148                         ap->state = ANEG_STATE_AN_ENABLE;
2149                 }
2150                 break;
2151
2152         case ANEG_STATE_COMPLETE_ACK_INIT:
2153                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2154                         ret = ANEG_FAILED;
2155                         break;
2156                 }
2157                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2158                                MR_LP_ADV_HALF_DUPLEX |
2159                                MR_LP_ADV_SYM_PAUSE |
2160                                MR_LP_ADV_ASYM_PAUSE |
2161                                MR_LP_ADV_REMOTE_FAULT1 |
2162                                MR_LP_ADV_REMOTE_FAULT2 |
2163                                MR_LP_ADV_NEXT_PAGE |
2164                                MR_TOGGLE_RX |
2165                                MR_NP_RX);
2166                 if (ap->rxconfig & ANEG_CFG_FD)
2167                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2168                 if (ap->rxconfig & ANEG_CFG_HD)
2169                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2170                 if (ap->rxconfig & ANEG_CFG_PS1)
2171                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2172                 if (ap->rxconfig & ANEG_CFG_PS2)
2173                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2174                 if (ap->rxconfig & ANEG_CFG_RF1)
2175                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2176                 if (ap->rxconfig & ANEG_CFG_RF2)
2177                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2178                 if (ap->rxconfig & ANEG_CFG_NP)
2179                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2180
2181                 ap->link_time = ap->cur_time;
2182
2183                 ap->flags ^= (MR_TOGGLE_TX);
2184                 if (ap->rxconfig & 0x0008)
2185                         ap->flags |= MR_TOGGLE_RX;
2186                 if (ap->rxconfig & ANEG_CFG_NP)
2187                         ap->flags |= MR_NP_RX;
2188                 ap->flags |= MR_PAGE_RX;
2189
2190                 ap->state = ANEG_STATE_COMPLETE_ACK;
2191                 ret = ANEG_TIMER_ENAB;
2192                 break;
2193
2194         case ANEG_STATE_COMPLETE_ACK:
2195                 if (ap->ability_match != 0 &&
2196                     ap->rxconfig == 0) {
2197                         ap->state = ANEG_STATE_AN_ENABLE;
2198                         break;
2199                 }
2200                 delta = ap->cur_time - ap->link_time;
2201                 if (delta > ANEG_STATE_SETTLE_TIME) {
2202                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2203                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2204                         } else {
2205                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2206                                     !(ap->flags & MR_NP_RX)) {
2207                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2208                                 } else {
2209                                         ret = ANEG_FAILED;
2210                                 }
2211                         }
2212                 }
2213                 break;
2214
2215         case ANEG_STATE_IDLE_DETECT_INIT:
2216                 ap->link_time = ap->cur_time;
2217                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2218                 tw32_f(MAC_MODE, tp->mac_mode);
2219                 udelay(40);
2220
2221                 ap->state = ANEG_STATE_IDLE_DETECT;
2222                 ret = ANEG_TIMER_ENAB;
2223                 break;
2224
2225         case ANEG_STATE_IDLE_DETECT:
2226                 if (ap->ability_match != 0 &&
2227                     ap->rxconfig == 0) {
2228                         ap->state = ANEG_STATE_AN_ENABLE;
2229                         break;
2230                 }
2231                 delta = ap->cur_time - ap->link_time;
2232                 if (delta > ANEG_STATE_SETTLE_TIME) {
2233                         /* XXX another gem from the Broadcom driver :( */
2234                         ap->state = ANEG_STATE_LINK_OK;
2235                 }
2236                 break;
2237
2238         case ANEG_STATE_LINK_OK:
2239                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2240                 ret = ANEG_DONE;
2241                 break;
2242
2243         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2244                 /* ??? unimplemented */
2245                 break;
2246
2247         case ANEG_STATE_NEXT_PAGE_WAIT:
2248                 /* ??? unimplemented */
2249                 break;
2250
2251         default:
2252                 ret = ANEG_FAILED;
2253                 break;
2254         };
2255
2256         return ret;
2257 }
2258
2259 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2260 {
2261         int res = 0;
2262         struct tg3_fiber_aneginfo aninfo;
2263         int status = ANEG_FAILED;
2264         unsigned int tick;
2265         u32 tmp;
2266
2267         tw32_f(MAC_TX_AUTO_NEG, 0);
2268
2269         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2270         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2271         udelay(40);
2272
2273         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2274         udelay(40);
2275
2276         memset(&aninfo, 0, sizeof(aninfo));
2277         aninfo.flags |= MR_AN_ENABLE;
2278         aninfo.state = ANEG_STATE_UNKNOWN;
2279         aninfo.cur_time = 0;
2280         tick = 0;
2281         while (++tick < 195000) {
2282                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2283                 if (status == ANEG_DONE || status == ANEG_FAILED)
2284                         break;
2285
2286                 udelay(1);
2287         }
2288
2289         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2290         tw32_f(MAC_MODE, tp->mac_mode);
2291         udelay(40);
2292
2293         *flags = aninfo.flags;
2294
2295         if (status == ANEG_DONE &&
2296             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2297                              MR_LP_ADV_FULL_DUPLEX)))
2298                 res = 1;
2299
2300         return res;
2301 }
2302
2303 static void tg3_init_bcm8002(struct tg3 *tp)
2304 {
2305         u32 mac_status = tr32(MAC_STATUS);
2306         int i;
2307
2308         /* Reset when initting first time or we have a link. */
2309         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2310             !(mac_status & MAC_STATUS_PCS_SYNCED))
2311                 return;
2312
2313         /* Set PLL lock range. */
2314         tg3_writephy(tp, 0x16, 0x8007);
2315
2316         /* SW reset */
2317         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2318
2319         /* Wait for reset to complete. */
2320         /* XXX schedule_timeout() ... */
2321         for (i = 0; i < 500; i++)
2322                 udelay(10);
2323
2324         /* Config mode; select PMA/Ch 1 regs. */
2325         tg3_writephy(tp, 0x10, 0x8411);
2326
2327         /* Enable auto-lock and comdet, select txclk for tx. */
2328         tg3_writephy(tp, 0x11, 0x0a10);
2329
2330         tg3_writephy(tp, 0x18, 0x00a0);
2331         tg3_writephy(tp, 0x16, 0x41ff);
2332
2333         /* Assert and deassert POR. */
2334         tg3_writephy(tp, 0x13, 0x0400);
2335         udelay(40);
2336         tg3_writephy(tp, 0x13, 0x0000);
2337
2338         tg3_writephy(tp, 0x11, 0x0a50);
2339         udelay(40);
2340         tg3_writephy(tp, 0x11, 0x0a10);
2341
2342         /* Wait for signal to stabilize */
2343         /* XXX schedule_timeout() ... */
2344         for (i = 0; i < 15000; i++)
2345                 udelay(10);
2346
2347         /* Deselect the channel register so we can read the PHYID
2348          * later.
2349          */
2350         tg3_writephy(tp, 0x10, 0x8011);
2351 }
2352
2353 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2354 {
2355         u32 sg_dig_ctrl, sg_dig_status;
2356         u32 serdes_cfg, expected_sg_dig_ctrl;
2357         int workaround, port_a;
2358         int current_link_up;
2359
2360         serdes_cfg = 0;
2361         expected_sg_dig_ctrl = 0;
2362         workaround = 0;
2363         port_a = 1;
2364         current_link_up = 0;
2365
2366         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2367             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2368                 workaround = 1;
2369                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2370                         port_a = 0;
2371
2372                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2373                 /* preserve bits 20-23 for voltage regulator */
2374                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2375         }
2376
2377         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2378
2379         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2380                 if (sg_dig_ctrl & (1 << 31)) {
2381                         if (workaround) {
2382                                 u32 val = serdes_cfg;
2383
2384                                 if (port_a)
2385                                         val |= 0xc010000;
2386                                 else
2387                                         val |= 0x4010000;
2388                                 tw32_f(MAC_SERDES_CFG, val);
2389                         }
2390                         tw32_f(SG_DIG_CTRL, 0x01388400);
2391                 }
2392                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2393                         tg3_setup_flow_control(tp, 0, 0);
2394                         current_link_up = 1;
2395                 }
2396                 goto out;
2397         }
2398
2399         /* Want auto-negotiation.  */
2400         expected_sg_dig_ctrl = 0x81388400;
2401
2402         /* Pause capability */
2403         expected_sg_dig_ctrl |= (1 << 11);
2404
2405         /* Asymettric pause */
2406         expected_sg_dig_ctrl |= (1 << 12);
2407
2408         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2409                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2410                     tp->serdes_counter &&
2411                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2412                                     MAC_STATUS_RCVD_CFG)) ==
2413                      MAC_STATUS_PCS_SYNCED)) {
2414                         tp->serdes_counter--;
2415                         current_link_up = 1;
2416                         goto out;
2417                 }
2418 restart_autoneg:
2419                 if (workaround)
2420                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2421                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2422                 udelay(5);
2423                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2424
2425                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2426                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2427         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2428                                  MAC_STATUS_SIGNAL_DET)) {
2429                 sg_dig_status = tr32(SG_DIG_STATUS);
2430                 mac_status = tr32(MAC_STATUS);
2431
2432                 if ((sg_dig_status & (1 << 1)) &&
2433                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2434                         u32 local_adv, remote_adv;
2435
2436                         local_adv = ADVERTISE_PAUSE_CAP;
2437                         remote_adv = 0;
2438                         if (sg_dig_status & (1 << 19))
2439                                 remote_adv |= LPA_PAUSE_CAP;
2440                         if (sg_dig_status & (1 << 20))
2441                                 remote_adv |= LPA_PAUSE_ASYM;
2442
2443                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2444                         current_link_up = 1;
2445                         tp->serdes_counter = 0;
2446                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2447                 } else if (!(sg_dig_status & (1 << 1))) {
2448                         if (tp->serdes_counter)
2449                                 tp->serdes_counter--;
2450                         else {
2451                                 if (workaround) {
2452                                         u32 val = serdes_cfg;
2453
2454                                         if (port_a)
2455                                                 val |= 0xc010000;
2456                                         else
2457                                                 val |= 0x4010000;
2458
2459                                         tw32_f(MAC_SERDES_CFG, val);
2460                                 }
2461
2462                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2463                                 udelay(40);
2464
2465                                 /* Link parallel detection - link is up */
2466                                 /* only if we have PCS_SYNC and not */
2467                                 /* receiving config code words */
2468                                 mac_status = tr32(MAC_STATUS);
2469                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2470                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2471                                         tg3_setup_flow_control(tp, 0, 0);
2472                                         current_link_up = 1;
2473                                         tp->tg3_flags2 |=
2474                                                 TG3_FLG2_PARALLEL_DETECT;
2475                                         tp->serdes_counter =
2476                                                 SERDES_PARALLEL_DET_TIMEOUT;
2477                                 } else
2478                                         goto restart_autoneg;
2479                         }
2480                 }
2481         } else {
2482                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2483                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2484         }
2485
2486 out:
2487         return current_link_up;
2488 }
2489
2490 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2491 {
2492         int current_link_up = 0;
2493
2494         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2495                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2496                 goto out;
2497         }
2498
2499         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2500                 u32 flags;
2501                 int i;
2502
2503                 if (fiber_autoneg(tp, &flags)) {
2504                         u32 local_adv, remote_adv;
2505
2506                         local_adv = ADVERTISE_PAUSE_CAP;
2507                         remote_adv = 0;
2508                         if (flags & MR_LP_ADV_SYM_PAUSE)
2509                                 remote_adv |= LPA_PAUSE_CAP;
2510                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2511                                 remote_adv |= LPA_PAUSE_ASYM;
2512
2513                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2514
2515                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2516                         current_link_up = 1;
2517                 }
2518                 for (i = 0; i < 30; i++) {
2519                         udelay(20);
2520                         tw32_f(MAC_STATUS,
2521                                (MAC_STATUS_SYNC_CHANGED |
2522                                 MAC_STATUS_CFG_CHANGED));
2523                         udelay(40);
2524                         if ((tr32(MAC_STATUS) &
2525                              (MAC_STATUS_SYNC_CHANGED |
2526                               MAC_STATUS_CFG_CHANGED)) == 0)
2527                                 break;
2528                 }
2529
2530                 mac_status = tr32(MAC_STATUS);
2531                 if (current_link_up == 0 &&
2532                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2533                     !(mac_status & MAC_STATUS_RCVD_CFG))
2534                         current_link_up = 1;
2535         } else {
2536                 /* Forcing 1000FD link up. */
2537                 current_link_up = 1;
2538                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2539
2540                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2541                 udelay(40);
2542         }
2543
2544 out:
2545         return current_link_up;
2546 }
2547
2548 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2549 {
2550         u32 orig_pause_cfg;
2551         u16 orig_active_speed;
2552         u8 orig_active_duplex;
2553         u32 mac_status;
2554         int current_link_up;
2555         int i;
2556
2557         orig_pause_cfg =
2558                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2559                                   TG3_FLAG_TX_PAUSE));
2560         orig_active_speed = tp->link_config.active_speed;
2561         orig_active_duplex = tp->link_config.active_duplex;
2562
2563         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2564             netif_carrier_ok(tp->dev) &&
2565             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2566                 mac_status = tr32(MAC_STATUS);
2567                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2568                                MAC_STATUS_SIGNAL_DET |
2569                                MAC_STATUS_CFG_CHANGED |
2570                                MAC_STATUS_RCVD_CFG);
2571                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2572                                    MAC_STATUS_SIGNAL_DET)) {
2573                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2574                                             MAC_STATUS_CFG_CHANGED));
2575                         return 0;
2576                 }
2577         }
2578
2579         tw32_f(MAC_TX_AUTO_NEG, 0);
2580
2581         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2582         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2583         tw32_f(MAC_MODE, tp->mac_mode);
2584         udelay(40);
2585
2586         if (tp->phy_id == PHY_ID_BCM8002)
2587                 tg3_init_bcm8002(tp);
2588
2589         /* Enable link change event even when serdes polling.  */
2590         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2591         udelay(40);
2592
2593         current_link_up = 0;
2594         mac_status = tr32(MAC_STATUS);
2595
2596         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2597                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2598         else
2599                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2600
2601         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2602         tw32_f(MAC_MODE, tp->mac_mode);
2603         udelay(40);
2604
2605         tp->hw_status->status =
2606                 (SD_STATUS_UPDATED |
2607                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2608
2609         for (i = 0; i < 100; i++) {
2610                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2611                                     MAC_STATUS_CFG_CHANGED));
2612                 udelay(5);
2613                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2614                                          MAC_STATUS_CFG_CHANGED |
2615                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2616                         break;
2617         }
2618
2619         mac_status = tr32(MAC_STATUS);
2620         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2621                 current_link_up = 0;
2622                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2623                     tp->serdes_counter == 0) {
2624                         tw32_f(MAC_MODE, (tp->mac_mode |
2625                                           MAC_MODE_SEND_CONFIGS));
2626                         udelay(1);
2627                         tw32_f(MAC_MODE, tp->mac_mode);
2628                 }
2629         }
2630
2631         if (current_link_up == 1) {
2632                 tp->link_config.active_speed = SPEED_1000;
2633                 tp->link_config.active_duplex = DUPLEX_FULL;
2634                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2635                                     LED_CTRL_LNKLED_OVERRIDE |
2636                                     LED_CTRL_1000MBPS_ON));
2637         } else {
2638                 tp->link_config.active_speed = SPEED_INVALID;
2639                 tp->link_config.active_duplex = DUPLEX_INVALID;
2640                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2641                                     LED_CTRL_LNKLED_OVERRIDE |
2642                                     LED_CTRL_TRAFFIC_OVERRIDE));
2643         }
2644
2645         if (current_link_up != netif_carrier_ok(tp->dev)) {
2646                 if (current_link_up)
2647                         netif_carrier_on(tp->dev);
2648                 else
2649                         netif_carrier_off(tp->dev);
2650                 tg3_link_report(tp);
2651         } else {
2652                 u32 now_pause_cfg =
2653                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2654                                          TG3_FLAG_TX_PAUSE);
2655                 if (orig_pause_cfg != now_pause_cfg ||
2656                     orig_active_speed != tp->link_config.active_speed ||
2657                     orig_active_duplex != tp->link_config.active_duplex)
2658                         tg3_link_report(tp);
2659         }
2660
2661         return 0;
2662 }
2663
2664 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2665 {
2666         int current_link_up, err = 0;
2667         u32 bmsr, bmcr;
2668         u16 current_speed;
2669         u8 current_duplex;
2670
2671         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2672         tw32_f(MAC_MODE, tp->mac_mode);
2673         udelay(40);
2674
2675         tw32(MAC_EVENT, 0);
2676
2677         tw32_f(MAC_STATUS,
2678              (MAC_STATUS_SYNC_CHANGED |
2679               MAC_STATUS_CFG_CHANGED |
2680               MAC_STATUS_MI_COMPLETION |
2681               MAC_STATUS_LNKSTATE_CHANGED));
2682         udelay(40);
2683
2684         if (force_reset)
2685                 tg3_phy_reset(tp);
2686
2687         current_link_up = 0;
2688         current_speed = SPEED_INVALID;
2689         current_duplex = DUPLEX_INVALID;
2690
2691         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2692         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2693         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2694                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2695                         bmsr |= BMSR_LSTATUS;
2696                 else
2697                         bmsr &= ~BMSR_LSTATUS;
2698         }
2699
2700         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2701
2702         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2703             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2704                 /* do nothing, just check for link up at the end */
2705         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2706                 u32 adv, new_adv;
2707
2708                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2709                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2710                                   ADVERTISE_1000XPAUSE |
2711                                   ADVERTISE_1000XPSE_ASYM |
2712                                   ADVERTISE_SLCT);
2713
2714                 /* Always advertise symmetric PAUSE just like copper */
2715                 new_adv |= ADVERTISE_1000XPAUSE;
2716
2717                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2718                         new_adv |= ADVERTISE_1000XHALF;
2719                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2720                         new_adv |= ADVERTISE_1000XFULL;
2721
2722                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2723                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2724                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2725                         tg3_writephy(tp, MII_BMCR, bmcr);
2726
2727                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2728                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
2729                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2730
2731                         return err;
2732                 }
2733         } else {
2734                 u32 new_bmcr;
2735
2736                 bmcr &= ~BMCR_SPEED1000;
2737                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2738
2739                 if (tp->link_config.duplex == DUPLEX_FULL)
2740                         new_bmcr |= BMCR_FULLDPLX;
2741
2742                 if (new_bmcr != bmcr) {
2743                         /* BMCR_SPEED1000 is a reserved bit that needs
2744                          * to be set on write.
2745                          */
2746                         new_bmcr |= BMCR_SPEED1000;
2747
2748                         /* Force a linkdown */
2749                         if (netif_carrier_ok(tp->dev)) {
2750                                 u32 adv;
2751
2752                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2753                                 adv &= ~(ADVERTISE_1000XFULL |
2754                                          ADVERTISE_1000XHALF |
2755                                          ADVERTISE_SLCT);
2756                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2757                                 tg3_writephy(tp, MII_BMCR, bmcr |
2758                                                            BMCR_ANRESTART |
2759                                                            BMCR_ANENABLE);
2760                                 udelay(10);
2761                                 netif_carrier_off(tp->dev);
2762                         }
2763                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2764                         bmcr = new_bmcr;
2765                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2766                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2767                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2768                             ASIC_REV_5714) {
2769                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2770                                         bmsr |= BMSR_LSTATUS;
2771                                 else
2772                                         bmsr &= ~BMSR_LSTATUS;
2773                         }
2774                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2775                 }
2776         }
2777
2778         if (bmsr & BMSR_LSTATUS) {
2779                 current_speed = SPEED_1000;
2780                 current_link_up = 1;
2781                 if (bmcr & BMCR_FULLDPLX)
2782                         current_duplex = DUPLEX_FULL;
2783                 else
2784                         current_duplex = DUPLEX_HALF;
2785
2786                 if (bmcr & BMCR_ANENABLE) {
2787                         u32 local_adv, remote_adv, common;
2788
2789                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2790                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2791                         common = local_adv & remote_adv;
2792                         if (common & (ADVERTISE_1000XHALF |
2793                                       ADVERTISE_1000XFULL)) {
2794                                 if (common & ADVERTISE_1000XFULL)
2795                                         current_duplex = DUPLEX_FULL;
2796                                 else
2797                                         current_duplex = DUPLEX_HALF;
2798
2799                                 tg3_setup_flow_control(tp, local_adv,
2800                                                        remote_adv);
2801                         }
2802                         else
2803                                 current_link_up = 0;
2804                 }
2805         }
2806
2807         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2808         if (tp->link_config.active_duplex == DUPLEX_HALF)
2809                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2810
2811         tw32_f(MAC_MODE, tp->mac_mode);
2812         udelay(40);
2813
2814         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2815
2816         tp->link_config.active_speed = current_speed;
2817         tp->link_config.active_duplex = current_duplex;
2818
2819         if (current_link_up != netif_carrier_ok(tp->dev)) {
2820                 if (current_link_up)
2821                         netif_carrier_on(tp->dev);
2822                 else {
2823                         netif_carrier_off(tp->dev);
2824                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2825                 }
2826                 tg3_link_report(tp);
2827         }
2828         return err;
2829 }
2830
2831 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2832 {
2833         if (tp->serdes_counter) {
2834                 /* Give autoneg time to complete. */
2835                 tp->serdes_counter--;
2836                 return;
2837         }
2838         if (!netif_carrier_ok(tp->dev) &&
2839             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2840                 u32 bmcr;
2841
2842                 tg3_readphy(tp, MII_BMCR, &bmcr);
2843                 if (bmcr & BMCR_ANENABLE) {
2844                         u32 phy1, phy2;
2845
2846                         /* Select shadow register 0x1f */
2847                         tg3_writephy(tp, 0x1c, 0x7c00);
2848                         tg3_readphy(tp, 0x1c, &phy1);
2849
2850                         /* Select expansion interrupt status register */
2851                         tg3_writephy(tp, 0x17, 0x0f01);
2852                         tg3_readphy(tp, 0x15, &phy2);
2853                         tg3_readphy(tp, 0x15, &phy2);
2854
2855                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2856                                 /* We have signal detect and not receiving
2857                                  * config code words, link is up by parallel
2858                                  * detection.
2859                                  */
2860
2861                                 bmcr &= ~BMCR_ANENABLE;
2862                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2863                                 tg3_writephy(tp, MII_BMCR, bmcr);
2864                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2865                         }
2866                 }
2867         }
2868         else if (netif_carrier_ok(tp->dev) &&
2869                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2870                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2871                 u32 phy2;
2872
2873                 /* Select expansion interrupt status register */
2874                 tg3_writephy(tp, 0x17, 0x0f01);
2875                 tg3_readphy(tp, 0x15, &phy2);
2876                 if (phy2 & 0x20) {
2877                         u32 bmcr;
2878
2879                         /* Config code words received, turn on autoneg. */
2880                         tg3_readphy(tp, MII_BMCR, &bmcr);
2881                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2882
2883                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2884
2885                 }
2886         }
2887 }
2888
2889 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2890 {
2891         int err;
2892
2893         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2894                 err = tg3_setup_fiber_phy(tp, force_reset);
2895         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2896                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2897         } else {
2898                 err = tg3_setup_copper_phy(tp, force_reset);
2899         }
2900
2901         if (tp->link_config.active_speed == SPEED_1000 &&
2902             tp->link_config.active_duplex == DUPLEX_HALF)
2903                 tw32(MAC_TX_LENGTHS,
2904                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2905                       (6 << TX_LENGTHS_IPG_SHIFT) |
2906                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2907         else
2908                 tw32(MAC_TX_LENGTHS,
2909                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2910                       (6 << TX_LENGTHS_IPG_SHIFT) |
2911                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2912
2913         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2914                 if (netif_carrier_ok(tp->dev)) {
2915                         tw32(HOSTCC_STAT_COAL_TICKS,
2916                              tp->coal.stats_block_coalesce_usecs);
2917                 } else {
2918                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2919                 }
2920         }
2921
2922         return err;
2923 }
2924
2925 /* This is called whenever we suspect that the system chipset is re-
2926  * ordering the sequence of MMIO to the tx send mailbox. The symptom
2927  * is bogus tx completions. We try to recover by setting the
2928  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
2929  * in the workqueue.
2930  */
2931 static void tg3_tx_recover(struct tg3 *tp)
2932 {
2933         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
2934                tp->write32_tx_mbox == tg3_write_indirect_mbox);
2935
2936         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
2937                "mapped I/O cycles to the network device, attempting to "
2938                "recover. Please report the problem to the driver maintainer "
2939                "and include system chipset information.\n", tp->dev->name);
2940
2941         spin_lock(&tp->lock);
2942         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
2943         spin_unlock(&tp->lock);
2944 }
2945
2946 static inline u32 tg3_tx_avail(struct tg3 *tp)
2947 {
2948         smp_mb();
2949         return (tp->tx_pending -
2950                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
2951 }
2952
2953 /* Tigon3 never reports partial packet sends.  So we do not
2954  * need special logic to handle SKBs that have not had all
2955  * of their frags sent yet, like SunGEM does.
2956  */
2957 static void tg3_tx(struct tg3 *tp)
2958 {
2959         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2960         u32 sw_idx = tp->tx_cons;
2961
2962         while (sw_idx != hw_idx) {
2963                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2964                 struct sk_buff *skb = ri->skb;
2965                 int i, tx_bug = 0;
2966
2967                 if (unlikely(skb == NULL)) {
2968                         tg3_tx_recover(tp);
2969                         return;
2970                 }
2971
2972                 pci_unmap_single(tp->pdev,
2973                                  pci_unmap_addr(ri, mapping),
2974                                  skb_headlen(skb),
2975                                  PCI_DMA_TODEVICE);
2976
2977                 ri->skb = NULL;
2978
2979                 sw_idx = NEXT_TX(sw_idx);
2980
2981                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2982                         ri = &tp->tx_buffers[sw_idx];
2983                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
2984                                 tx_bug = 1;
2985
2986                         pci_unmap_page(tp->pdev,
2987                                        pci_unmap_addr(ri, mapping),
2988                                        skb_shinfo(skb)->frags[i].size,
2989                                        PCI_DMA_TODEVICE);
2990
2991                         sw_idx = NEXT_TX(sw_idx);
2992                 }
2993
2994                 dev_kfree_skb(skb);
2995
2996                 if (unlikely(tx_bug)) {
2997                         tg3_tx_recover(tp);
2998                         return;
2999                 }
3000         }
3001
3002         tp->tx_cons = sw_idx;
3003
3004         /* Need to make the tx_cons update visible to tg3_start_xmit()
3005          * before checking for netif_queue_stopped().  Without the
3006          * memory barrier, there is a small possibility that tg3_start_xmit()
3007          * will miss it and cause the queue to be stopped forever.
3008          */
3009         smp_mb();
3010
3011         if (unlikely(netif_queue_stopped(tp->dev) &&
3012                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))) {
3013                 netif_tx_lock(tp->dev);
3014                 if (netif_queue_stopped(tp->dev) &&
3015                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))
3016                         netif_wake_queue(tp->dev);
3017                 netif_tx_unlock(tp->dev);
3018         }
3019 }
3020
3021 /* Returns size of skb allocated or < 0 on error.
3022  *
3023  * We only need to fill in the address because the other members
3024  * of the RX descriptor are invariant, see tg3_init_rings.
3025  *
3026  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3027  * posting buffers we only dirty the first cache line of the RX
3028  * descriptor (containing the address).  Whereas for the RX status
3029  * buffers the cpu only reads the last cacheline of the RX descriptor
3030  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3031  */
3032 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3033                             int src_idx, u32 dest_idx_unmasked)
3034 {
3035         struct tg3_rx_buffer_desc *desc;
3036         struct ring_info *map, *src_map;
3037         struct sk_buff *skb;
3038         dma_addr_t mapping;
3039         int skb_size, dest_idx;
3040
3041         src_map = NULL;
3042         switch (opaque_key) {
3043         case RXD_OPAQUE_RING_STD:
3044                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3045                 desc = &tp->rx_std[dest_idx];
3046                 map = &tp->rx_std_buffers[dest_idx];
3047                 if (src_idx >= 0)
3048                         src_map = &tp->rx_std_buffers[src_idx];
3049                 skb_size = tp->rx_pkt_buf_sz;
3050                 break;
3051
3052         case RXD_OPAQUE_RING_JUMBO:
3053                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3054                 desc = &tp->rx_jumbo[dest_idx];
3055                 map = &tp->rx_jumbo_buffers[dest_idx];
3056                 if (src_idx >= 0)
3057                         src_map = &tp->rx_jumbo_buffers[src_idx];
3058                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3059                 break;
3060
3061         default:
3062                 return -EINVAL;
3063         };
3064
3065         /* Do not overwrite any of the map or rp information
3066          * until we are sure we can commit to a new buffer.
3067          *
3068          * Callers depend upon this behavior and assume that
3069          * we leave everything unchanged if we fail.
3070          */
3071         skb = netdev_alloc_skb(tp->dev, skb_size);
3072         if (skb == NULL)
3073                 return -ENOMEM;
3074
3075         skb_reserve(skb, tp->rx_offset);
3076
3077         mapping = pci_map_single(tp->pdev, skb->data,
3078                                  skb_size - tp->rx_offset,
3079                                  PCI_DMA_FROMDEVICE);
3080
3081         map->skb = skb;
3082         pci_unmap_addr_set(map, mapping, mapping);
3083
3084         if (src_map != NULL)
3085                 src_map->skb = NULL;
3086
3087         desc->addr_hi = ((u64)mapping >> 32);
3088         desc->addr_lo = ((u64)mapping & 0xffffffff);
3089
3090         return skb_size;
3091 }
3092
3093 /* We only need to move over in the address because the other
3094  * members of the RX descriptor are invariant.  See notes above
3095  * tg3_alloc_rx_skb for full details.
3096  */
3097 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3098                            int src_idx, u32 dest_idx_unmasked)
3099 {
3100         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3101         struct ring_info *src_map, *dest_map;
3102         int dest_idx;
3103
3104         switch (opaque_key) {
3105         case RXD_OPAQUE_RING_STD:
3106                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3107                 dest_desc = &tp->rx_std[dest_idx];
3108                 dest_map = &tp->rx_std_buffers[dest_idx];
3109                 src_desc = &tp->rx_std[src_idx];
3110                 src_map = &tp->rx_std_buffers[src_idx];
3111                 break;
3112
3113         case RXD_OPAQUE_RING_JUMBO:
3114                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3115                 dest_desc = &tp->rx_jumbo[dest_idx];
3116                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3117                 src_desc = &tp->rx_jumbo[src_idx];
3118                 src_map = &tp->rx_jumbo_buffers[src_idx];
3119                 break;
3120
3121         default:
3122                 return;
3123         };
3124
3125         dest_map->skb = src_map->skb;
3126         pci_unmap_addr_set(dest_map, mapping,
3127                            pci_unmap_addr(src_map, mapping));
3128         dest_desc->addr_hi = src_desc->addr_hi;
3129         dest_desc->addr_lo = src_desc->addr_lo;
3130
3131         src_map->skb = NULL;
3132 }
3133
3134 #if TG3_VLAN_TAG_USED
3135 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3136 {
3137         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3138 }
3139 #endif
3140
3141 /* The RX ring scheme is composed of multiple rings which post fresh
3142  * buffers to the chip, and one special ring the chip uses to report
3143  * status back to the host.
3144  *
3145  * The special ring reports the status of received packets to the
3146  * host.  The chip does not write into the original descriptor the
3147  * RX buffer was obtained from.  The chip simply takes the original
3148  * descriptor as provided by the host, updates the status and length
3149  * field, then writes this into the next status ring entry.
3150  *
3151  * Each ring the host uses to post buffers to the chip is described
3152  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3153  * it is first placed into the on-chip ram.  When the packet's length
3154  * is known, it walks down the TG3_BDINFO entries to select the ring.
3155  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3156  * which is within the range of the new packet's length is chosen.
3157  *
3158  * The "separate ring for rx status" scheme may sound queer, but it makes
3159  * sense from a cache coherency perspective.  If only the host writes
3160  * to the buffer post rings, and only the chip writes to the rx status
3161  * rings, then cache lines never move beyond shared-modified state.
3162  * If both the host and chip were to write into the same ring, cache line
3163  * eviction could occur since both entities want it in an exclusive state.
3164  */
3165 static int tg3_rx(struct tg3 *tp, int budget)
3166 {
3167         u32 work_mask, rx_std_posted = 0;
3168         u32 sw_idx = tp->rx_rcb_ptr;
3169         u16 hw_idx;
3170         int received;
3171
3172         hw_idx = tp->hw_status->idx[0].rx_producer;
3173         /*
3174          * We need to order the read of hw_idx and the read of
3175          * the opaque cookie.
3176          */
3177         rmb();
3178         work_mask = 0;
3179         received = 0;
3180         while (sw_idx != hw_idx && budget > 0) {
3181                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3182                 unsigned int len;
3183                 struct sk_buff *skb;
3184                 dma_addr_t dma_addr;
3185                 u32 opaque_key, desc_idx, *post_ptr;
3186
3187                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3188                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3189                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3190                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3191                                                   mapping);
3192                         skb = tp->rx_std_buffers[desc_idx].skb;
3193                         post_ptr = &tp->rx_std_ptr;
3194                         rx_std_posted++;
3195                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3196                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3197                                                   mapping);
3198                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3199                         post_ptr = &tp->rx_jumbo_ptr;
3200                 }
3201                 else {
3202                         goto next_pkt_nopost;
3203                 }
3204
3205                 work_mask |= opaque_key;
3206
3207                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3208                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3209                 drop_it:
3210                         tg3_recycle_rx(tp, opaque_key,
3211                                        desc_idx, *post_ptr);
3212                 drop_it_no_recycle:
3213                         /* Other statistics kept track of by card. */
3214                         tp->net_stats.rx_dropped++;
3215                         goto next_pkt;
3216                 }
3217
3218                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3219
3220                 if (len > RX_COPY_THRESHOLD
3221                         && tp->rx_offset == 2
3222                         /* rx_offset != 2 iff this is a 5701 card running
3223                          * in PCI-X mode [see tg3_get_invariants()] */
3224                 ) {
3225                         int skb_size;
3226
3227                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3228                                                     desc_idx, *post_ptr);
3229                         if (skb_size < 0)
3230                                 goto drop_it;
3231
3232                         pci_unmap_single(tp->pdev, dma_addr,
3233                                          skb_size - tp->rx_offset,
3234                                          PCI_DMA_FROMDEVICE);
3235
3236                         skb_put(skb, len);
3237                 } else {
3238                         struct sk_buff *copy_skb;
3239
3240                         tg3_recycle_rx(tp, opaque_key,
3241                                        desc_idx, *post_ptr);
3242
3243                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3244                         if (copy_skb == NULL)
3245                                 goto drop_it_no_recycle;
3246
3247                         skb_reserve(copy_skb, 2);
3248                         skb_put(copy_skb, len);
3249                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3250                         memcpy(copy_skb->data, skb->data, len);
3251                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3252
3253                         /* We'll reuse the original ring buffer. */
3254                         skb = copy_skb;
3255                 }
3256
3257                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3258                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3259                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3260                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3261                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3262                 else
3263                         skb->ip_summed = CHECKSUM_NONE;
3264
3265                 skb->protocol = eth_type_trans(skb, tp->dev);
3266 #if TG3_VLAN_TAG_USED
3267                 if (tp->vlgrp != NULL &&
3268                     desc->type_flags & RXD_FLAG_VLAN) {
3269                         tg3_vlan_rx(tp, skb,
3270                                     desc->err_vlan & RXD_VLAN_MASK);
3271                 } else
3272 #endif
3273                         netif_receive_skb(skb);
3274
3275                 tp->dev->last_rx = jiffies;
3276                 received++;
3277                 budget--;
3278
3279 next_pkt:
3280                 (*post_ptr)++;
3281
3282                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3283                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3284
3285                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3286                                      TG3_64BIT_REG_LOW, idx);
3287                         work_mask &= ~RXD_OPAQUE_RING_STD;
3288                         rx_std_posted = 0;
3289                 }
3290 next_pkt_nopost:
3291                 sw_idx++;
3292                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3293
3294                 /* Refresh hw_idx to see if there is new work */
3295                 if (sw_idx == hw_idx) {
3296                         hw_idx = tp->hw_status->idx[0].rx_producer;
3297                         rmb();
3298                 }
3299         }
3300
3301         /* ACK the status ring. */
3302         tp->rx_rcb_ptr = sw_idx;
3303         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3304
3305         /* Refill RX ring(s). */
3306         if (work_mask & RXD_OPAQUE_RING_STD) {
3307                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3308                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3309                              sw_idx);
3310         }
3311         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3312                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3313                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3314                              sw_idx);
3315         }
3316         mmiowb();
3317
3318         return received;
3319 }
3320
3321 static int tg3_poll(struct net_device *netdev, int *budget)
3322 {
3323         struct tg3 *tp = netdev_priv(netdev);
3324         struct tg3_hw_status *sblk = tp->hw_status;
3325         int done;
3326
3327         /* handle link change and other phy events */
3328         if (!(tp->tg3_flags &
3329               (TG3_FLAG_USE_LINKCHG_REG |
3330                TG3_FLAG_POLL_SERDES))) {
3331                 if (sblk->status & SD_STATUS_LINK_CHG) {
3332                         sblk->status = SD_STATUS_UPDATED |
3333                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3334                         spin_lock(&tp->lock);
3335                         tg3_setup_phy(tp, 0);
3336                         spin_unlock(&tp->lock);
3337                 }
3338         }
3339
3340         /* run TX completion thread */
3341         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3342                 tg3_tx(tp);
3343                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3344                         netif_rx_complete(netdev);
3345                         schedule_work(&tp->reset_task);
3346                         return 0;
3347                 }
3348         }
3349
3350         /* run RX thread, within the bounds set by NAPI.
3351          * All RX "locking" is done by ensuring outside
3352          * code synchronizes with dev->poll()
3353          */
3354         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3355                 int orig_budget = *budget;
3356                 int work_done;
3357
3358                 if (orig_budget > netdev->quota)
3359                         orig_budget = netdev->quota;
3360
3361                 work_done = tg3_rx(tp, orig_budget);
3362
3363                 *budget -= work_done;
3364                 netdev->quota -= work_done;
3365         }
3366
3367         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3368                 tp->last_tag = sblk->status_tag;
3369                 rmb();
3370         } else
3371                 sblk->status &= ~SD_STATUS_UPDATED;
3372
3373         /* if no more work, tell net stack and NIC we're done */
3374         done = !tg3_has_work(tp);
3375         if (done) {
3376                 netif_rx_complete(netdev);
3377                 tg3_restart_ints(tp);
3378         }
3379
3380         return (done ? 0 : 1);
3381 }
3382
3383 static void tg3_irq_quiesce(struct tg3 *tp)
3384 {
3385         BUG_ON(tp->irq_sync);
3386
3387         tp->irq_sync = 1;
3388         smp_mb();
3389
3390         synchronize_irq(tp->pdev->irq);
3391 }
3392
3393 static inline int tg3_irq_sync(struct tg3 *tp)
3394 {
3395         return tp->irq_sync;
3396 }
3397
3398 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3399  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3400  * with as well.  Most of the time, this is not necessary except when
3401  * shutting down the device.
3402  */
3403 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3404 {
3405         if (irq_sync)
3406                 tg3_irq_quiesce(tp);
3407         spin_lock_bh(&tp->lock);
3408 }
3409
3410 static inline void tg3_full_unlock(struct tg3 *tp)
3411 {
3412         spin_unlock_bh(&tp->lock);
3413 }
3414
3415 /* One-shot MSI handler - Chip automatically disables interrupt
3416  * after sending MSI so driver doesn't have to do it.
3417  */
3418 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3419 {
3420         struct net_device *dev = dev_id;
3421         struct tg3 *tp = netdev_priv(dev);
3422
3423         prefetch(tp->hw_status);
3424         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3425
3426         if (likely(!tg3_irq_sync(tp)))
3427                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3428
3429         return IRQ_HANDLED;
3430 }
3431
3432 /* MSI ISR - No need to check for interrupt sharing and no need to
3433  * flush status block and interrupt mailbox. PCI ordering rules
3434  * guarantee that MSI will arrive after the status block.
3435  */
3436 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3437 {
3438         struct net_device *dev = dev_id;
3439         struct tg3 *tp = netdev_priv(dev);
3440
3441         prefetch(tp->hw_status);
3442         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3443         /*
3444          * Writing any value to intr-mbox-0 clears PCI INTA# and
3445          * chip-internal interrupt pending events.
3446          * Writing non-zero to intr-mbox-0 additional tells the
3447          * NIC to stop sending us irqs, engaging "in-intr-handler"
3448          * event coalescing.
3449          */
3450         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3451         if (likely(!tg3_irq_sync(tp)))
3452                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3453
3454         return IRQ_RETVAL(1);
3455 }
3456
3457 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3458 {
3459         struct net_device *dev = dev_id;
3460         struct tg3 *tp = netdev_priv(dev);
3461         struct tg3_hw_status *sblk = tp->hw_status;
3462         unsigned int handled = 1;
3463
3464         /* In INTx mode, it is possible for the interrupt to arrive at
3465          * the CPU before the status block posted prior to the interrupt.
3466          * Reading the PCI State register will confirm whether the
3467          * interrupt is ours and will flush the status block.
3468          */
3469         if ((sblk->status & SD_STATUS_UPDATED) ||
3470             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3471                 /*
3472                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3473                  * chip-internal interrupt pending events.
3474                  * Writing non-zero to intr-mbox-0 additional tells the
3475                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3476                  * event coalescing.
3477                  */
3478                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3479                              0x00000001);
3480                 if (tg3_irq_sync(tp))
3481                         goto out;
3482                 sblk->status &= ~SD_STATUS_UPDATED;
3483                 if (likely(tg3_has_work(tp))) {
3484                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3485                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3486                 } else {
3487                         /* No work, shared interrupt perhaps?  re-enable
3488                          * interrupts, and flush that PCI write
3489                          */
3490                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3491                                 0x00000000);
3492                 }
3493         } else {        /* shared interrupt */
3494                 handled = 0;
3495         }
3496 out:
3497         return IRQ_RETVAL(handled);
3498 }
3499
3500 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3501 {
3502         struct net_device *dev = dev_id;
3503         struct tg3 *tp = netdev_priv(dev);
3504         struct tg3_hw_status *sblk = tp->hw_status;
3505         unsigned int handled = 1;
3506
3507         /* In INTx mode, it is possible for the interrupt to arrive at
3508          * the CPU before the status block posted prior to the interrupt.
3509          * Reading the PCI State register will confirm whether the
3510          * interrupt is ours and will flush the status block.
3511          */
3512         if ((sblk->status_tag != tp->last_tag) ||
3513             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3514                 /*
3515                  * writing any value to intr-mbox-0 clears PCI INTA# and
3516                  * chip-internal interrupt pending events.
3517                  * writing non-zero to intr-mbox-0 additional tells the
3518                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3519                  * event coalescing.
3520                  */
3521                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3522                              0x00000001);
3523                 if (tg3_irq_sync(tp))
3524                         goto out;
3525                 if (netif_rx_schedule_prep(dev)) {
3526                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3527                         /* Update last_tag to mark that this status has been
3528                          * seen. Because interrupt may be shared, we may be
3529                          * racing with tg3_poll(), so only update last_tag
3530                          * if tg3_poll() is not scheduled.
3531                          */
3532                         tp->last_tag = sblk->status_tag;
3533                         __netif_rx_schedule(dev);
3534                 }
3535         } else {        /* shared interrupt */
3536                 handled = 0;
3537         }
3538 out:
3539         return IRQ_RETVAL(handled);
3540 }
3541
3542 /* ISR for interrupt test */
3543 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3544                 struct pt_regs *regs)
3545 {
3546         struct net_device *dev = dev_id;
3547         struct tg3 *tp = netdev_priv(dev);
3548         struct tg3_hw_status *sblk = tp->hw_status;
3549
3550         if ((sblk->status & SD_STATUS_UPDATED) ||
3551             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3552                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3553                              0x00000001);
3554                 return IRQ_RETVAL(1);
3555         }
3556         return IRQ_RETVAL(0);
3557 }
3558
3559 static int tg3_init_hw(struct tg3 *, int);
3560 static int tg3_halt(struct tg3 *, int, int);
3561
3562 /* Restart hardware after configuration changes, self-test, etc.
3563  * Invoked with tp->lock held.
3564  */
3565 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3566 {
3567         int err;
3568
3569         err = tg3_init_hw(tp, reset_phy);
3570         if (err) {
3571                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3572                        "aborting.\n", tp->dev->name);
3573                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3574                 tg3_full_unlock(tp);
3575                 del_timer_sync(&tp->timer);
3576                 tp->irq_sync = 0;
3577                 netif_poll_enable(tp->dev);
3578                 dev_close(tp->dev);
3579                 tg3_full_lock(tp, 0);
3580         }
3581         return err;
3582 }
3583
3584 #ifdef CONFIG_NET_POLL_CONTROLLER
3585 static void tg3_poll_controller(struct net_device *dev)
3586 {
3587         struct tg3 *tp = netdev_priv(dev);
3588
3589         tg3_interrupt(tp->pdev->irq, dev, NULL);
3590 }
3591 #endif
3592
3593 static void tg3_reset_task(void *_data)
3594 {
3595         struct tg3 *tp = _data;
3596         unsigned int restart_timer;
3597
3598         tg3_full_lock(tp, 0);
3599         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3600
3601         if (!netif_running(tp->dev)) {
3602                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3603                 tg3_full_unlock(tp);
3604                 return;
3605         }
3606
3607         tg3_full_unlock(tp);
3608
3609         tg3_netif_stop(tp);
3610
3611         tg3_full_lock(tp, 1);
3612
3613         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3614         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3615
3616         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3617                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3618                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3619                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3620                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3621         }
3622
3623         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3624         if (tg3_init_hw(tp, 1))
3625                 goto out;
3626
3627         tg3_netif_start(tp);
3628
3629         if (restart_timer)
3630                 mod_timer(&tp->timer, jiffies + 1);
3631
3632 out:
3633         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3634
3635         tg3_full_unlock(tp);
3636 }
3637
3638 static void tg3_tx_timeout(struct net_device *dev)
3639 {
3640         struct tg3 *tp = netdev_priv(dev);
3641
3642         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3643                dev->name);
3644
3645         schedule_work(&tp->reset_task);
3646 }
3647
3648 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3649 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3650 {
3651         u32 base = (u32) mapping & 0xffffffff;
3652
3653         return ((base > 0xffffdcc0) &&
3654                 (base + len + 8 < base));
3655 }
3656
3657 /* Test for DMA addresses > 40-bit */
3658 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3659                                           int len)
3660 {
3661 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3662         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3663                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3664         return 0;
3665 #else
3666         return 0;
3667 #endif
3668 }
3669
3670 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3671
3672 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3673 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3674                                        u32 last_plus_one, u32 *start,
3675                                        u32 base_flags, u32 mss)
3676 {
3677         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3678         dma_addr_t new_addr = 0;
3679         u32 entry = *start;
3680         int i, ret = 0;
3681
3682         if (!new_skb) {
3683                 ret = -1;
3684         } else {
3685                 /* New SKB is guaranteed to be linear. */
3686                 entry = *start;
3687                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3688                                           PCI_DMA_TODEVICE);
3689                 /* Make sure new skb does not cross any 4G boundaries.
3690                  * Drop the packet if it does.
3691                  */
3692                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3693                         ret = -1;
3694                         dev_kfree_skb(new_skb);
3695                         new_skb = NULL;
3696                 } else {
3697                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3698                                     base_flags, 1 | (mss << 1));
3699                         *start = NEXT_TX(entry);
3700                 }
3701         }
3702
3703         /* Now clean up the sw ring entries. */
3704         i = 0;
3705         while (entry != last_plus_one) {
3706                 int len;
3707
3708                 if (i == 0)
3709                         len = skb_headlen(skb);
3710                 else
3711                         len = skb_shinfo(skb)->frags[i-1].size;
3712                 pci_unmap_single(tp->pdev,
3713                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3714                                  len, PCI_DMA_TODEVICE);
3715                 if (i == 0) {
3716                         tp->tx_buffers[entry].skb = new_skb;
3717                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3718                 } else {
3719                         tp->tx_buffers[entry].skb = NULL;
3720                 }
3721                 entry = NEXT_TX(entry);
3722                 i++;
3723         }
3724
3725         dev_kfree_skb(skb);
3726
3727         return ret;
3728 }
3729
3730 static void tg3_set_txd(struct tg3 *tp, int entry,
3731                         dma_addr_t mapping, int len, u32 flags,
3732                         u32 mss_and_is_end)
3733 {
3734         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3735         int is_end = (mss_and_is_end & 0x1);
3736         u32 mss = (mss_and_is_end >> 1);
3737         u32 vlan_tag = 0;
3738
3739         if (is_end)
3740                 flags |= TXD_FLAG_END;
3741         if (flags & TXD_FLAG_VLAN) {
3742                 vlan_tag = flags >> 16;
3743                 flags &= 0xffff;
3744         }
3745         vlan_tag |= (mss << TXD_MSS_SHIFT);
3746
3747         txd->addr_hi = ((u64) mapping >> 32);
3748         txd->addr_lo = ((u64) mapping & 0xffffffff);
3749         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3750         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3751 }
3752
3753 /* hard_start_xmit for devices that don't have any bugs and
3754  * support TG3_FLG2_HW_TSO_2 only.
3755  */
3756 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3757 {
3758         struct tg3 *tp = netdev_priv(dev);
3759         dma_addr_t mapping;
3760         u32 len, entry, base_flags, mss;
3761
3762         len = skb_headlen(skb);
3763
3764         /* We are running in BH disabled context with netif_tx_lock
3765          * and TX reclaim runs via tp->poll inside of a software
3766          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3767          * no IRQ context deadlocks to worry about either.  Rejoice!
3768          */
3769         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3770                 if (!netif_queue_stopped(dev)) {
3771                         netif_stop_queue(dev);
3772
3773                         /* This is a hard error, log it. */
3774                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3775                                "queue awake!\n", dev->name);
3776                 }
3777                 return NETDEV_TX_BUSY;
3778         }
3779
3780         entry = tp->tx_prod;
3781         base_flags = 0;
3782 #if TG3_TSO_SUPPORT != 0
3783         mss = 0;
3784         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3785             (mss = skb_shinfo(skb)->gso_size) != 0) {
3786                 int tcp_opt_len, ip_tcp_len;
3787
3788                 if (skb_header_cloned(skb) &&
3789                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3790                         dev_kfree_skb(skb);
3791                         goto out_unlock;
3792                 }
3793
3794                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
3795                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
3796                 else {
3797                         tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3798                         ip_tcp_len = (skb->nh.iph->ihl * 4) +
3799                                      sizeof(struct tcphdr);
3800
3801                         skb->nh.iph->check = 0;
3802                         skb->nh.iph->tot_len = htons(mss + ip_tcp_len +
3803                                                      tcp_opt_len);
3804                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
3805                 }
3806
3807                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3808                                TXD_FLAG_CPU_POST_DMA);
3809
3810                 skb->h.th->check = 0;
3811
3812         }
3813         else if (skb->ip_summed == CHECKSUM_PARTIAL)
3814                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3815 #else
3816         mss = 0;
3817         if (skb->ip_summed == CHECKSUM_PARTIAL)
3818                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3819 #endif
3820 #if TG3_VLAN_TAG_USED
3821         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3822                 base_flags |= (TXD_FLAG_VLAN |
3823                                (vlan_tx_tag_get(skb) << 16));
3824 #endif
3825
3826         /* Queue skb data, a.k.a. the main skb fragment. */
3827         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3828
3829         tp->tx_buffers[entry].skb = skb;
3830         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3831
3832         tg3_set_txd(tp, entry, mapping, len, base_flags,
3833                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3834
3835         entry = NEXT_TX(entry);
3836
3837         /* Now loop through additional data fragments, and queue them. */
3838         if (skb_shinfo(skb)->nr_frags > 0) {
3839                 unsigned int i, last;
3840
3841                 last = skb_shinfo(skb)->nr_frags - 1;
3842                 for (i = 0; i <= last; i++) {
3843                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3844
3845                         len = frag->size;
3846                         mapping = pci_map_page(tp->pdev,
3847                                                frag->page,
3848                                                frag->page_offset,
3849                                                len, PCI_DMA_TODEVICE);
3850
3851                         tp->tx_buffers[entry].skb = NULL;
3852                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3853
3854                         tg3_set_txd(tp, entry, mapping, len,
3855                                     base_flags, (i == last) | (mss << 1));
3856
3857                         entry = NEXT_TX(entry);
3858                 }
3859         }
3860
3861         /* Packets are ready, update Tx producer idx local and on card. */
3862         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3863
3864         tp->tx_prod = entry;
3865         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
3866                 netif_stop_queue(dev);
3867                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
3868                         netif_wake_queue(tp->dev);
3869         }
3870
3871 out_unlock:
3872         mmiowb();
3873
3874         dev->trans_start = jiffies;
3875
3876         return NETDEV_TX_OK;
3877 }
3878
3879 #if TG3_TSO_SUPPORT != 0
3880 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
3881
3882 /* Use GSO to workaround a rare TSO bug that may be triggered when the
3883  * TSO header is greater than 80 bytes.
3884  */
3885 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
3886 {
3887         struct sk_buff *segs, *nskb;
3888
3889         /* Estimate the number of fragments in the worst case */
3890         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
3891                 netif_stop_queue(tp->dev);
3892                 return NETDEV_TX_BUSY;
3893         }
3894
3895         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
3896         if (unlikely(IS_ERR(segs)))
3897                 goto tg3_tso_bug_end;
3898
3899         do {
3900                 nskb = segs;
3901                 segs = segs->next;
3902                 nskb->next = NULL;
3903                 tg3_start_xmit_dma_bug(nskb, tp->dev);
3904         } while (segs);
3905
3906 tg3_tso_bug_end:
3907         dev_kfree_skb(skb);
3908
3909         return NETDEV_TX_OK;
3910 }
3911 #endif
3912
3913 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3914  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3915  */
3916 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3917 {
3918         struct tg3 *tp = netdev_priv(dev);
3919         dma_addr_t mapping;
3920         u32 len, entry, base_flags, mss;
3921         int would_hit_hwbug;
3922
3923         len = skb_headlen(skb);
3924
3925         /* We are running in BH disabled context with netif_tx_lock
3926          * and TX reclaim runs via tp->poll inside of a software
3927          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3928          * no IRQ context deadlocks to worry about either.  Rejoice!
3929          */
3930         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3931                 if (!netif_queue_stopped(dev)) {
3932                         netif_stop_queue(dev);
3933
3934                         /* This is a hard error, log it. */
3935                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3936                                "queue awake!\n", dev->name);
3937                 }
3938                 return NETDEV_TX_BUSY;
3939         }
3940
3941         entry = tp->tx_prod;
3942         base_flags = 0;
3943         if (skb->ip_summed == CHECKSUM_PARTIAL)
3944                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3945 #if TG3_TSO_SUPPORT != 0
3946         mss = 0;
3947         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3948             (mss = skb_shinfo(skb)->gso_size) != 0) {
3949                 int tcp_opt_len, ip_tcp_len, hdr_len;
3950
3951                 if (skb_header_cloned(skb) &&
3952                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3953                         dev_kfree_skb(skb);
3954                         goto out_unlock;
3955                 }
3956
3957                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3958                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3959
3960                 hdr_len = ip_tcp_len + tcp_opt_len;
3961                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
3962                              (tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG))
3963                         return (tg3_tso_bug(tp, skb));
3964
3965                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3966                                TXD_FLAG_CPU_POST_DMA);
3967
3968                 skb->nh.iph->check = 0;
3969                 skb->nh.iph->tot_len = htons(mss + hdr_len);
3970                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3971                         skb->h.th->check = 0;
3972                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3973                 }
3974                 else {
3975                         skb->h.th->check =
3976                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3977                                                    skb->nh.iph->daddr,
3978                                                    0, IPPROTO_TCP, 0);
3979                 }
3980
3981                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3982                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3983                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3984                                 int tsflags;
3985
3986                                 tsflags = ((skb->nh.iph->ihl - 5) +
3987                                            (tcp_opt_len >> 2));
3988                                 mss |= (tsflags << 11);
3989                         }
3990                 } else {
3991                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3992                                 int tsflags;
3993
3994                                 tsflags = ((skb->nh.iph->ihl - 5) +
3995                                            (tcp_opt_len >> 2));
3996                                 base_flags |= tsflags << 12;
3997                         }
3998                 }
3999         }
4000 #else
4001         mss = 0;
4002 #endif
4003 #if TG3_VLAN_TAG_USED
4004         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4005                 base_flags |= (TXD_FLAG_VLAN |
4006                                (vlan_tx_tag_get(skb) << 16));
4007 #endif
4008
4009         /* Queue skb data, a.k.a. the main skb fragment. */
4010         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4011
4012         tp->tx_buffers[entry].skb = skb;
4013         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4014
4015         would_hit_hwbug = 0;
4016
4017         if (tg3_4g_overflow_test(mapping, len))
4018                 would_hit_hwbug = 1;
4019
4020         tg3_set_txd(tp, entry, mapping, len, base_flags,
4021                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4022
4023         entry = NEXT_TX(entry);
4024
4025         /* Now loop through additional data fragments, and queue them. */
4026         if (skb_shinfo(skb)->nr_frags > 0) {
4027                 unsigned int i, last;
4028
4029                 last = skb_shinfo(skb)->nr_frags - 1;
4030                 for (i = 0; i <= last; i++) {
4031                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4032
4033                         len = frag->size;
4034                         mapping = pci_map_page(tp->pdev,
4035                                                frag->page,
4036                                                frag->page_offset,
4037                                                len, PCI_DMA_TODEVICE);
4038
4039                         tp->tx_buffers[entry].skb = NULL;
4040                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4041
4042                         if (tg3_4g_overflow_test(mapping, len))
4043                                 would_hit_hwbug = 1;
4044
4045                         if (tg3_40bit_overflow_test(tp, mapping, len))
4046                                 would_hit_hwbug = 1;
4047
4048                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4049                                 tg3_set_txd(tp, entry, mapping, len,
4050                                             base_flags, (i == last)|(mss << 1));
4051                         else
4052                                 tg3_set_txd(tp, entry, mapping, len,
4053                                             base_flags, (i == last));
4054
4055                         entry = NEXT_TX(entry);
4056                 }
4057         }
4058
4059         if (would_hit_hwbug) {
4060                 u32 last_plus_one = entry;
4061                 u32 start;
4062
4063                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4064                 start &= (TG3_TX_RING_SIZE - 1);
4065
4066                 /* If the workaround fails due to memory/mapping
4067                  * failure, silently drop this packet.
4068                  */
4069                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4070                                                 &start, base_flags, mss))
4071                         goto out_unlock;
4072
4073                 entry = start;
4074         }
4075
4076         /* Packets are ready, update Tx producer idx local and on card. */
4077         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4078
4079         tp->tx_prod = entry;
4080         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4081                 netif_stop_queue(dev);
4082                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
4083                         netif_wake_queue(tp->dev);
4084         }
4085
4086 out_unlock:
4087         mmiowb();
4088
4089         dev->trans_start = jiffies;
4090
4091         return NETDEV_TX_OK;
4092 }
4093
4094 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4095                                int new_mtu)
4096 {
4097         dev->mtu = new_mtu;
4098
4099         if (new_mtu > ETH_DATA_LEN) {
4100                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4101                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4102                         ethtool_op_set_tso(dev, 0);
4103                 }
4104                 else
4105                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4106         } else {
4107                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4108                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4109                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4110         }
4111 }
4112
4113 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4114 {
4115         struct tg3 *tp = netdev_priv(dev);
4116         int err;
4117
4118         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4119                 return -EINVAL;
4120
4121         if (!netif_running(dev)) {
4122                 /* We'll just catch it later when the
4123                  * device is up'd.
4124                  */
4125                 tg3_set_mtu(dev, tp, new_mtu);
4126                 return 0;
4127         }
4128
4129         tg3_netif_stop(tp);
4130
4131         tg3_full_lock(tp, 1);
4132
4133         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4134
4135         tg3_set_mtu(dev, tp, new_mtu);
4136
4137         err = tg3_restart_hw(tp, 0);
4138
4139         if (!err)
4140                 tg3_netif_start(tp);
4141
4142         tg3_full_unlock(tp);
4143
4144         return err;
4145 }
4146
4147 /* Free up pending packets in all rx/tx rings.
4148  *
4149  * The chip has been shut down and the driver detached from
4150  * the networking, so no interrupts or new tx packets will
4151  * end up in the driver.  tp->{tx,}lock is not held and we are not
4152  * in an interrupt context and thus may sleep.
4153  */
4154 static void tg3_free_rings(struct tg3 *tp)
4155 {
4156         struct ring_info *rxp;
4157         int i;
4158
4159         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4160                 rxp = &tp->rx_std_buffers[i];
4161
4162                 if (rxp->skb == NULL)
4163                         continue;
4164                 pci_unmap_single(tp->pdev,
4165                                  pci_unmap_addr(rxp, mapping),
4166                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4167                                  PCI_DMA_FROMDEVICE);
4168                 dev_kfree_skb_any(rxp->skb);
4169                 rxp->skb = NULL;
4170         }
4171
4172         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4173                 rxp = &tp->rx_jumbo_buffers[i];
4174
4175                 if (rxp->skb == NULL)
4176                         continue;
4177                 pci_unmap_single(tp->pdev,
4178                                  pci_unmap_addr(rxp, mapping),
4179                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4180                                  PCI_DMA_FROMDEVICE);
4181                 dev_kfree_skb_any(rxp->skb);
4182                 rxp->skb = NULL;
4183         }
4184
4185         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4186                 struct tx_ring_info *txp;
4187                 struct sk_buff *skb;
4188                 int j;
4189
4190                 txp = &tp->tx_buffers[i];
4191                 skb = txp->skb;
4192
4193                 if (skb == NULL) {
4194                         i++;
4195                         continue;
4196                 }
4197
4198                 pci_unmap_single(tp->pdev,
4199                                  pci_unmap_addr(txp, mapping),
4200                                  skb_headlen(skb),
4201                                  PCI_DMA_TODEVICE);
4202                 txp->skb = NULL;
4203
4204                 i++;
4205
4206                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4207                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4208                         pci_unmap_page(tp->pdev,
4209                                        pci_unmap_addr(txp, mapping),
4210                                        skb_shinfo(skb)->frags[j].size,
4211                                        PCI_DMA_TODEVICE);
4212                         i++;
4213                 }
4214
4215                 dev_kfree_skb_any(skb);
4216         }
4217 }
4218
4219 /* Initialize tx/rx rings for packet processing.
4220  *
4221  * The chip has been shut down and the driver detached from
4222  * the networking, so no interrupts or new tx packets will
4223  * end up in the driver.  tp->{tx,}lock are held and thus
4224  * we may not sleep.
4225  */
4226 static int tg3_init_rings(struct tg3 *tp)
4227 {
4228         u32 i;
4229
4230         /* Free up all the SKBs. */
4231         tg3_free_rings(tp);
4232
4233         /* Zero out all descriptors. */
4234         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4235         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4236         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4237         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4238
4239         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4240         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4241             (tp->dev->mtu > ETH_DATA_LEN))
4242                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4243
4244         /* Initialize invariants of the rings, we only set this
4245          * stuff once.  This works because the card does not
4246          * write into the rx buffer posting rings.
4247          */
4248         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4249                 struct tg3_rx_buffer_desc *rxd;
4250
4251                 rxd = &tp->rx_std[i];
4252                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4253                         << RXD_LEN_SHIFT;
4254                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4255                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4256                                (i << RXD_OPAQUE_INDEX_SHIFT));
4257         }
4258
4259         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4260                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4261                         struct tg3_rx_buffer_desc *rxd;
4262
4263                         rxd = &tp->rx_jumbo[i];
4264                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4265                                 << RXD_LEN_SHIFT;
4266                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4267                                 RXD_FLAG_JUMBO;
4268                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4269                                (i << RXD_OPAQUE_INDEX_SHIFT));
4270                 }
4271         }
4272
4273         /* Now allocate fresh SKBs for each rx ring. */
4274         for (i = 0; i < tp->rx_pending; i++) {
4275                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4276                         printk(KERN_WARNING PFX
4277                                "%s: Using a smaller RX standard ring, "
4278                                "only %d out of %d buffers were allocated "
4279                                "successfully.\n",
4280                                tp->dev->name, i, tp->rx_pending);
4281                         if (i == 0)
4282                                 return -ENOMEM;
4283                         tp->rx_pending = i;
4284                         break;
4285                 }
4286         }
4287
4288         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4289                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4290                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4291                                              -1, i) < 0) {
4292                                 printk(KERN_WARNING PFX
4293                                        "%s: Using a smaller RX jumbo ring, "
4294                                        "only %d out of %d buffers were "
4295                                        "allocated successfully.\n",
4296                                        tp->dev->name, i, tp->rx_jumbo_pending);
4297                                 if (i == 0) {
4298                                         tg3_free_rings(tp);
4299                                         return -ENOMEM;
4300                                 }
4301                                 tp->rx_jumbo_pending = i;
4302                                 break;
4303                         }
4304                 }
4305         }
4306         return 0;
4307 }
4308
4309 /*
4310  * Must not be invoked with interrupt sources disabled and
4311  * the hardware shutdown down.
4312  */
4313 static void tg3_free_consistent(struct tg3 *tp)
4314 {
4315         kfree(tp->rx_std_buffers);
4316         tp->rx_std_buffers = NULL;
4317         if (tp->rx_std) {
4318                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4319                                     tp->rx_std, tp->rx_std_mapping);
4320                 tp->rx_std = NULL;
4321         }
4322         if (tp->rx_jumbo) {
4323                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4324                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4325                 tp->rx_jumbo = NULL;
4326         }
4327         if (tp->rx_rcb) {
4328                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4329                                     tp->rx_rcb, tp->rx_rcb_mapping);
4330                 tp->rx_rcb = NULL;
4331         }
4332         if (tp->tx_ring) {
4333                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4334                         tp->tx_ring, tp->tx_desc_mapping);
4335                 tp->tx_ring = NULL;
4336         }
4337         if (tp->hw_status) {
4338                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4339                                     tp->hw_status, tp->status_mapping);
4340                 tp->hw_status = NULL;
4341         }
4342         if (tp->hw_stats) {
4343                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4344                                     tp->hw_stats, tp->stats_mapping);
4345                 tp->hw_stats = NULL;
4346         }
4347 }
4348
4349 /*
4350  * Must not be invoked with interrupt sources disabled and
4351  * the hardware shutdown down.  Can sleep.
4352  */
4353 static int tg3_alloc_consistent(struct tg3 *tp)
4354 {
4355         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4356                                       (TG3_RX_RING_SIZE +
4357                                        TG3_RX_JUMBO_RING_SIZE)) +
4358                                      (sizeof(struct tx_ring_info) *
4359                                       TG3_TX_RING_SIZE),
4360                                      GFP_KERNEL);
4361         if (!tp->rx_std_buffers)
4362                 return -ENOMEM;
4363
4364         memset(tp->rx_std_buffers, 0,
4365                (sizeof(struct ring_info) *
4366                 (TG3_RX_RING_SIZE +
4367                  TG3_RX_JUMBO_RING_SIZE)) +
4368                (sizeof(struct tx_ring_info) *
4369                 TG3_TX_RING_SIZE));
4370
4371         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4372         tp->tx_buffers = (struct tx_ring_info *)
4373                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4374
4375         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4376                                           &tp->rx_std_mapping);
4377         if (!tp->rx_std)
4378                 goto err_out;
4379
4380         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4381                                             &tp->rx_jumbo_mapping);
4382
4383         if (!tp->rx_jumbo)
4384                 goto err_out;
4385
4386         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4387                                           &tp->rx_rcb_mapping);
4388         if (!tp->rx_rcb)
4389                 goto err_out;
4390
4391         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4392                                            &tp->tx_desc_mapping);
4393         if (!tp->tx_ring)
4394                 goto err_out;
4395
4396         tp->hw_status = pci_alloc_consistent(tp->pdev,
4397                                              TG3_HW_STATUS_SIZE,
4398                                              &tp->status_mapping);
4399         if (!tp->hw_status)
4400                 goto err_out;
4401
4402         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4403                                             sizeof(struct tg3_hw_stats),
4404                                             &tp->stats_mapping);
4405         if (!tp->hw_stats)
4406                 goto err_out;
4407
4408         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4409         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4410
4411         return 0;
4412
4413 err_out:
4414         tg3_free_consistent(tp);
4415         return -ENOMEM;
4416 }
4417
4418 #define MAX_WAIT_CNT 1000
4419
4420 /* To stop a block, clear the enable bit and poll till it
4421  * clears.  tp->lock is held.
4422  */
4423 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4424 {
4425         unsigned int i;
4426         u32 val;
4427
4428         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4429                 switch (ofs) {
4430                 case RCVLSC_MODE:
4431                 case DMAC_MODE:
4432                 case MBFREE_MODE:
4433                 case BUFMGR_MODE:
4434                 case MEMARB_MODE:
4435                         /* We can't enable/disable these bits of the
4436                          * 5705/5750, just say success.
4437                          */
4438                         return 0;
4439
4440                 default:
4441                         break;
4442                 };
4443         }
4444
4445         val = tr32(ofs);
4446         val &= ~enable_bit;
4447         tw32_f(ofs, val);
4448
4449         for (i = 0; i < MAX_WAIT_CNT; i++) {
4450                 udelay(100);
4451                 val = tr32(ofs);
4452                 if ((val & enable_bit) == 0)
4453                         break;
4454         }
4455
4456         if (i == MAX_WAIT_CNT && !silent) {
4457                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4458                        "ofs=%lx enable_bit=%x\n",
4459                        ofs, enable_bit);
4460                 return -ENODEV;
4461         }
4462
4463         return 0;
4464 }
4465
4466 /* tp->lock is held. */
4467 static int tg3_abort_hw(struct tg3 *tp, int silent)
4468 {
4469         int i, err;
4470
4471         tg3_disable_ints(tp);
4472
4473         tp->rx_mode &= ~RX_MODE_ENABLE;
4474         tw32_f(MAC_RX_MODE, tp->rx_mode);
4475         udelay(10);
4476
4477         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4478         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4479         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4480         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4481         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4482         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4483
4484         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4485         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4486         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4487         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4488         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4489         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4490         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4491
4492         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4493         tw32_f(MAC_MODE, tp->mac_mode);
4494         udelay(40);
4495
4496         tp->tx_mode &= ~TX_MODE_ENABLE;
4497         tw32_f(MAC_TX_MODE, tp->tx_mode);
4498
4499         for (i = 0; i < MAX_WAIT_CNT; i++) {
4500                 udelay(100);
4501                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4502                         break;
4503         }
4504         if (i >= MAX_WAIT_CNT) {
4505                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4506                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4507                        tp->dev->name, tr32(MAC_TX_MODE));
4508                 err |= -ENODEV;
4509         }
4510
4511         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4512         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4513         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4514
4515         tw32(FTQ_RESET, 0xffffffff);
4516         tw32(FTQ_RESET, 0x00000000);
4517
4518         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4519         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4520
4521         if (tp->hw_status)
4522                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4523         if (tp->hw_stats)
4524                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4525
4526         return err;
4527 }
4528
4529 /* tp->lock is held. */
4530 static int tg3_nvram_lock(struct tg3 *tp)
4531 {
4532         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4533                 int i;
4534
4535                 if (tp->nvram_lock_cnt == 0) {
4536                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4537                         for (i = 0; i < 8000; i++) {
4538                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4539                                         break;
4540                                 udelay(20);
4541                         }
4542                         if (i == 8000) {
4543                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4544                                 return -ENODEV;
4545                         }
4546                 }
4547                 tp->nvram_lock_cnt++;
4548         }
4549         return 0;
4550 }
4551
4552 /* tp->lock is held. */
4553 static void tg3_nvram_unlock(struct tg3 *tp)
4554 {
4555         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4556                 if (tp->nvram_lock_cnt > 0)
4557                         tp->nvram_lock_cnt--;
4558                 if (tp->nvram_lock_cnt == 0)
4559                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4560         }
4561 }
4562
4563 /* tp->lock is held. */
4564 static void tg3_enable_nvram_access(struct tg3 *tp)
4565 {
4566         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4567             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4568                 u32 nvaccess = tr32(NVRAM_ACCESS);
4569
4570                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4571         }
4572 }
4573
4574 /* tp->lock is held. */
4575 static void tg3_disable_nvram_access(struct tg3 *tp)
4576 {
4577         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4578             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4579                 u32 nvaccess = tr32(NVRAM_ACCESS);
4580
4581                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4582         }
4583 }
4584
4585 /* tp->lock is held. */
4586 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4587 {
4588         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4589                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4590
4591         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4592                 switch (kind) {
4593                 case RESET_KIND_INIT:
4594                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4595                                       DRV_STATE_START);
4596                         break;
4597
4598                 case RESET_KIND_SHUTDOWN:
4599                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4600                                       DRV_STATE_UNLOAD);
4601                         break;
4602
4603                 case RESET_KIND_SUSPEND:
4604                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4605                                       DRV_STATE_SUSPEND);
4606                         break;
4607
4608                 default:
4609                         break;
4610                 };
4611         }
4612 }
4613
4614 /* tp->lock is held. */
4615 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4616 {
4617         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4618                 switch (kind) {
4619                 case RESET_KIND_INIT:
4620                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4621                                       DRV_STATE_START_DONE);
4622                         break;
4623
4624                 case RESET_KIND_SHUTDOWN:
4625                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4626                                       DRV_STATE_UNLOAD_DONE);
4627                         break;
4628
4629                 default:
4630                         break;
4631                 };
4632         }
4633 }
4634
4635 /* tp->lock is held. */
4636 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4637 {
4638         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4639                 switch (kind) {
4640                 case RESET_KIND_INIT:
4641                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4642                                       DRV_STATE_START);
4643                         break;
4644
4645                 case RESET_KIND_SHUTDOWN:
4646                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4647                                       DRV_STATE_UNLOAD);
4648                         break;
4649
4650                 case RESET_KIND_SUSPEND:
4651                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4652                                       DRV_STATE_SUSPEND);
4653                         break;
4654
4655                 default:
4656                         break;
4657                 };
4658         }
4659 }
4660
4661 static void tg3_stop_fw(struct tg3 *);
4662
4663 /* tp->lock is held. */
4664 static int tg3_chip_reset(struct tg3 *tp)
4665 {
4666         u32 val;
4667         void (*write_op)(struct tg3 *, u32, u32);
4668         int i;
4669
4670         tg3_nvram_lock(tp);
4671
4672         /* No matching tg3_nvram_unlock() after this because
4673          * chip reset below will undo the nvram lock.
4674          */
4675         tp->nvram_lock_cnt = 0;
4676
4677         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4678             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4679             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4680                 tw32(GRC_FASTBOOT_PC, 0);
4681
4682         /*
4683          * We must avoid the readl() that normally takes place.
4684          * It locks machines, causes machine checks, and other
4685          * fun things.  So, temporarily disable the 5701
4686          * hardware workaround, while we do the reset.
4687          */
4688         write_op = tp->write32;
4689         if (write_op == tg3_write_flush_reg32)
4690                 tp->write32 = tg3_write32;
4691
4692         /* do the reset */
4693         val = GRC_MISC_CFG_CORECLK_RESET;
4694
4695         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4696                 if (tr32(0x7e2c) == 0x60) {
4697                         tw32(0x7e2c, 0x20);
4698                 }
4699                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4700                         tw32(GRC_MISC_CFG, (1 << 29));
4701                         val |= (1 << 29);
4702                 }
4703         }
4704
4705         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4706                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4707         tw32(GRC_MISC_CFG, val);
4708
4709         /* restore 5701 hardware bug workaround write method */
4710         tp->write32 = write_op;
4711
4712         /* Unfortunately, we have to delay before the PCI read back.
4713          * Some 575X chips even will not respond to a PCI cfg access
4714          * when the reset command is given to the chip.
4715          *
4716          * How do these hardware designers expect things to work
4717          * properly if the PCI write is posted for a long period
4718          * of time?  It is always necessary to have some method by
4719          * which a register read back can occur to push the write
4720          * out which does the reset.
4721          *
4722          * For most tg3 variants the trick below was working.
4723          * Ho hum...
4724          */
4725         udelay(120);
4726
4727         /* Flush PCI posted writes.  The normal MMIO registers
4728          * are inaccessible at this time so this is the only
4729          * way to make this reliably (actually, this is no longer
4730          * the case, see above).  I tried to use indirect
4731          * register read/write but this upset some 5701 variants.
4732          */
4733         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4734
4735         udelay(120);
4736
4737         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4738                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4739                         int i;
4740                         u32 cfg_val;
4741
4742                         /* Wait for link training to complete.  */
4743                         for (i = 0; i < 5000; i++)
4744                                 udelay(100);
4745
4746                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4747                         pci_write_config_dword(tp->pdev, 0xc4,
4748                                                cfg_val | (1 << 15));
4749                 }
4750                 /* Set PCIE max payload size and clear error status.  */
4751                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4752         }
4753
4754         /* Re-enable indirect register accesses. */
4755         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4756                                tp->misc_host_ctrl);
4757
4758         /* Set MAX PCI retry to zero. */
4759         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4760         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4761             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4762                 val |= PCISTATE_RETRY_SAME_DMA;
4763         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4764
4765         pci_restore_state(tp->pdev);
4766
4767         /* Make sure PCI-X relaxed ordering bit is clear. */
4768         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4769         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4770         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4771
4772         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4773                 u32 val;
4774
4775                 /* Chip reset on 5780 will reset MSI enable bit,
4776                  * so need to restore it.
4777                  */
4778                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4779                         u16 ctrl;
4780
4781                         pci_read_config_word(tp->pdev,
4782                                              tp->msi_cap + PCI_MSI_FLAGS,
4783                                              &ctrl);
4784                         pci_write_config_word(tp->pdev,
4785                                               tp->msi_cap + PCI_MSI_FLAGS,
4786                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4787                         val = tr32(MSGINT_MODE);
4788                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4789                 }
4790
4791                 val = tr32(MEMARB_MODE);
4792                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4793
4794         } else
4795                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4796
4797         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4798                 tg3_stop_fw(tp);
4799                 tw32(0x5000, 0x400);
4800         }
4801
4802         tw32(GRC_MODE, tp->grc_mode);
4803
4804         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4805                 u32 val = tr32(0xc4);
4806
4807                 tw32(0xc4, val | (1 << 15));
4808         }
4809
4810         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4811             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4812                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4813                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4814                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4815                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4816         }
4817
4818         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4819                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4820                 tw32_f(MAC_MODE, tp->mac_mode);
4821         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4822                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4823                 tw32_f(MAC_MODE, tp->mac_mode);
4824         } else
4825                 tw32_f(MAC_MODE, 0);
4826         udelay(40);
4827
4828         /* Wait for firmware initialization to complete. */
4829         for (i = 0; i < 100000; i++) {
4830                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4831                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4832                         break;
4833                 udelay(10);
4834         }
4835
4836         /* Chip might not be fitted with firmare.  Some Sun onboard
4837          * parts are configured like that.  So don't signal the timeout
4838          * of the above loop as an error, but do report the lack of
4839          * running firmware once.
4840          */
4841         if (i >= 100000 &&
4842             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4843                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4844
4845                 printk(KERN_INFO PFX "%s: No firmware running.\n",
4846                        tp->dev->name);
4847         }
4848
4849         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4850             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4851                 u32 val = tr32(0x7c00);
4852
4853                 tw32(0x7c00, val | (1 << 25));
4854         }
4855
4856         /* Reprobe ASF enable state.  */
4857         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4858         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4859         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4860         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4861                 u32 nic_cfg;
4862
4863                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4864                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4865                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4866                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4867                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4868                 }
4869         }
4870
4871         return 0;
4872 }
4873
4874 /* tp->lock is held. */
4875 static void tg3_stop_fw(struct tg3 *tp)
4876 {
4877         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4878                 u32 val;
4879                 int i;
4880
4881                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4882                 val = tr32(GRC_RX_CPU_EVENT);
4883                 val |= (1 << 14);
4884                 tw32(GRC_RX_CPU_EVENT, val);
4885
4886                 /* Wait for RX cpu to ACK the event.  */
4887                 for (i = 0; i < 100; i++) {
4888                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4889                                 break;
4890                         udelay(1);
4891                 }
4892         }
4893 }
4894
4895 /* tp->lock is held. */
4896 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4897 {
4898         int err;
4899
4900         tg3_stop_fw(tp);
4901
4902         tg3_write_sig_pre_reset(tp, kind);
4903
4904         tg3_abort_hw(tp, silent);
4905         err = tg3_chip_reset(tp);
4906
4907         tg3_write_sig_legacy(tp, kind);
4908         tg3_write_sig_post_reset(tp, kind);
4909
4910         if (err)
4911                 return err;
4912
4913         return 0;
4914 }
4915
4916 #define TG3_FW_RELEASE_MAJOR    0x0
4917 #define TG3_FW_RELASE_MINOR     0x0
4918 #define TG3_FW_RELEASE_FIX      0x0
4919 #define TG3_FW_START_ADDR       0x08000000
4920 #define TG3_FW_TEXT_ADDR        0x08000000
4921 #define TG3_FW_TEXT_LEN         0x9c0
4922 #define TG3_FW_RODATA_ADDR      0x080009c0
4923 #define TG3_FW_RODATA_LEN       0x60
4924 #define TG3_FW_DATA_ADDR        0x08000a40
4925 #define TG3_FW_DATA_LEN         0x20
4926 #define TG3_FW_SBSS_ADDR        0x08000a60
4927 #define TG3_FW_SBSS_LEN         0xc
4928 #define TG3_FW_BSS_ADDR         0x08000a70
4929 #define TG3_FW_BSS_LEN          0x10
4930
4931 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4932         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4933         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4934         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4935         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4936         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4937         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4938         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4939         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4940         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4941         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4942         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4943         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4944         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4945         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4946         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4947         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4948         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4949         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4950         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4951         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4952         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4953         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4954         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4955         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4956         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4957         0, 0, 0, 0, 0, 0,
4958         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4959         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4960         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4961         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4962         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4963         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4964         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4965         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4966         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4967         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4968         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4969         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4970         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4971         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4972         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4973         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4974         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4975         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4976         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4977         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4978         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4979         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4980         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4981         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4982         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4983         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4984         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4985         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4986         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4987         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4988         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4989         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4990         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4991         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4992         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4993         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4994         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4995         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4996         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4997         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4998         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4999         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5000         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5001         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5002         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5003         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5004         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5005         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5006         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5007         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5008         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5009         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5010         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5011         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5012         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5013         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5014         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5015         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5016         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5017         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5018         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5019         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5020         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5021         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5022         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5023 };
5024
5025 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5026         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5027         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5028         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5029         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5030         0x00000000
5031 };
5032
5033 #if 0 /* All zeros, don't eat up space with it. */
5034 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5035         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5036         0x00000000, 0x00000000, 0x00000000, 0x00000000
5037 };
5038 #endif
5039
5040 #define RX_CPU_SCRATCH_BASE     0x30000
5041 #define RX_CPU_SCRATCH_SIZE     0x04000
5042 #define TX_CPU_SCRATCH_BASE     0x34000
5043 #define TX_CPU_SCRATCH_SIZE     0x04000
5044
5045 /* tp->lock is held. */
5046 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5047 {
5048         int i;
5049
5050         BUG_ON(offset == TX_CPU_BASE &&
5051             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5052
5053         if (offset == RX_CPU_BASE) {
5054                 for (i = 0; i < 10000; i++) {
5055                         tw32(offset + CPU_STATE, 0xffffffff);
5056                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5057                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5058                                 break;
5059                 }
5060
5061                 tw32(offset + CPU_STATE, 0xffffffff);
5062                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5063                 udelay(10);
5064         } else {
5065                 for (i = 0; i < 10000; i++) {
5066                         tw32(offset + CPU_STATE, 0xffffffff);
5067                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5068                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5069                                 break;
5070                 }
5071         }
5072
5073         if (i >= 10000) {
5074                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5075                        "and %s CPU\n",
5076                        tp->dev->name,
5077                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5078                 return -ENODEV;
5079         }
5080
5081         /* Clear firmware's nvram arbitration. */
5082         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5083                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5084         return 0;
5085 }
5086
5087 struct fw_info {
5088         unsigned int text_base;
5089         unsigned int text_len;
5090         const u32 *text_data;
5091         unsigned int rodata_base;
5092         unsigned int rodata_len;
5093         const u32 *rodata_data;
5094         unsigned int data_base;
5095         unsigned int data_len;
5096         const u32 *data_data;
5097 };
5098
5099 /* tp->lock is held. */
5100 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5101                                  int cpu_scratch_size, struct fw_info *info)
5102 {
5103         int err, lock_err, i;
5104         void (*write_op)(struct tg3 *, u32, u32);
5105
5106         if (cpu_base == TX_CPU_BASE &&
5107             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5108                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5109                        "TX cpu firmware on %s which is 5705.\n",
5110                        tp->dev->name);
5111                 return -EINVAL;
5112         }
5113
5114         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5115                 write_op = tg3_write_mem;
5116         else
5117                 write_op = tg3_write_indirect_reg32;
5118
5119         /* It is possible that bootcode is still loading at this point.
5120          * Get the nvram lock first before halting the cpu.
5121          */
5122         lock_err = tg3_nvram_lock(tp);
5123         err = tg3_halt_cpu(tp, cpu_base);
5124         if (!lock_err)
5125                 tg3_nvram_unlock(tp);
5126         if (err)
5127                 goto out;
5128
5129         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5130                 write_op(tp, cpu_scratch_base + i, 0);
5131         tw32(cpu_base + CPU_STATE, 0xffffffff);
5132         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5133         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5134                 write_op(tp, (cpu_scratch_base +
5135                               (info->text_base & 0xffff) +
5136                               (i * sizeof(u32))),
5137                          (info->text_data ?
5138                           info->text_data[i] : 0));
5139         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5140                 write_op(tp, (cpu_scratch_base +
5141                               (info->rodata_base & 0xffff) +
5142                               (i * sizeof(u32))),
5143                          (info->rodata_data ?
5144                           info->rodata_data[i] : 0));
5145         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5146                 write_op(tp, (cpu_scratch_base +
5147                               (info->data_base & 0xffff) +
5148                               (i * sizeof(u32))),
5149                          (info->data_data ?
5150                           info->data_data[i] : 0));
5151
5152         err = 0;
5153
5154 out:
5155         return err;
5156 }
5157
5158 /* tp->lock is held. */
5159 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5160 {
5161         struct fw_info info;
5162         int err, i;
5163
5164         info.text_base = TG3_FW_TEXT_ADDR;
5165         info.text_len = TG3_FW_TEXT_LEN;
5166         info.text_data = &tg3FwText[0];
5167         info.rodata_base = TG3_FW_RODATA_ADDR;
5168         info.rodata_len = TG3_FW_RODATA_LEN;
5169         info.rodata_data = &tg3FwRodata[0];
5170         info.data_base = TG3_FW_DATA_ADDR;
5171         info.data_len = TG3_FW_DATA_LEN;
5172         info.data_data = NULL;
5173
5174         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5175                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5176                                     &info);
5177         if (err)
5178                 return err;
5179
5180         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5181                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5182                                     &info);
5183         if (err)
5184                 return err;
5185
5186         /* Now startup only the RX cpu. */
5187         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5188         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5189
5190         for (i = 0; i < 5; i++) {
5191                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5192                         break;
5193                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5194                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5195                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5196                 udelay(1000);
5197         }
5198         if (i >= 5) {
5199                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5200                        "to set RX CPU PC, is %08x should be %08x\n",
5201                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5202                        TG3_FW_TEXT_ADDR);
5203                 return -ENODEV;
5204         }
5205         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5206         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5207
5208         return 0;
5209 }
5210
5211 #if TG3_TSO_SUPPORT != 0
5212
5213 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5214 #define TG3_TSO_FW_RELASE_MINOR         0x6
5215 #define TG3_TSO_FW_RELEASE_FIX          0x0
5216 #define TG3_TSO_FW_START_ADDR           0x08000000
5217 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5218 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5219 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5220 #define TG3_TSO_FW_RODATA_LEN           0x60
5221 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5222 #define TG3_TSO_FW_DATA_LEN             0x30
5223 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5224 #define TG3_TSO_FW_SBSS_LEN             0x2c
5225 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5226 #define TG3_TSO_FW_BSS_LEN              0x894
5227
5228 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5229         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5230         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5231         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5232         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5233         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5234         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5235         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5236         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5237         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5238         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5239         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5240         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5241         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5242         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5243         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5244         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5245         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5246         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5247         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5248         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5249         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5250         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5251         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5252         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5253         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5254         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5255         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5256         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5257         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5258         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5259         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5260         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5261         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5262         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5263         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5264         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5265         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5266         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5267         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5268         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5269         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5270         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5271         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5272         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5273         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5274         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5275         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5276         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5277         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5278         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5279         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5280         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5281         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5282         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5283         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5284         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5285         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5286         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5287         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5288         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5289         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5290         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5291         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5292         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5293         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5294         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5295         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5296         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5297         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5298         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5299         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5300         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5301         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5302         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5303         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5304         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5305         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5306         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5307         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5308         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5309         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5310         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5311         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5312         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5313         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5314         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5315         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5316         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5317         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5318         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5319         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5320         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5321         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5322         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5323         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5324         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5325         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5326         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5327         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5328         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5329         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5330         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5331         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5332         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5333         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5334         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5335         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5336         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5337         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5338         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5339         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5340         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5341         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5342         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5343         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5344         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5345         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5346         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5347         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5348         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5349         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5350         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5351         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5352         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5353         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5354         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5355         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5356         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5357         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5358         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5359         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5360         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5361         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5362         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5363         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5364         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5365         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5366         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5367         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5368         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5369         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5370         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5371         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5372         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5373         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5374         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5375         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5376         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5377         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5378         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5379         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5380         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5381         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5382         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5383         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5384         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5385         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5386         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5387         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5388         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5389         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5390         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5391         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5392         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5393         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5394         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5395         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5396         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5397         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5398         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5399         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5400         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5401         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5402         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5403         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5404         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5405         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5406         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5407         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5408         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5409         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5410         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5411         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5412         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5413         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5414         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5415         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5416         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5417         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5418         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5419         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5420         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5421         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5422         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5423         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5424         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5425         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5426         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5427         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5428         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5429         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5430         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5431         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5432         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5433         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5434         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5435         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5436         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5437         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5438         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5439         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5440         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5441         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5442         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5443         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5444         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5445         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5446         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5447         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5448         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5449         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5450         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5451         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5452         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5453         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5454         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5455         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5456         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5457         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5458         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5459         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5460         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5461         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5462         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5463         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5464         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5465         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5466         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5467         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5468         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5469         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5470         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5471         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5472         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5473         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5474         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5475         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5476         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5477         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5478         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5479         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5480         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5481         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5482         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5483         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5484         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5485         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5486         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5487         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5488         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5489         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5490         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5491         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5492         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5493         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5494         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5495         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5496         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5497         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5498         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5499         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5500         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5501         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5502         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5503         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5504         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5505         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5506         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5507         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5508         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5509         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5510         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5511         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5512         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5513 };
5514
5515 static const u32 tg3TsoFwRodata[] = {
5516         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5517         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5518         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5519         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5520         0x00000000,
5521 };
5522
5523 static const u32 tg3TsoFwData[] = {
5524         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5525         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5526         0x00000000,
5527 };
5528
5529 /* 5705 needs a special version of the TSO firmware.  */
5530 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5531 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5532 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5533 #define TG3_TSO5_FW_START_ADDR          0x00010000
5534 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5535 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5536 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5537 #define TG3_TSO5_FW_RODATA_LEN          0x50
5538 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5539 #define TG3_TSO5_FW_DATA_LEN            0x20
5540 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5541 #define TG3_TSO5_FW_SBSS_LEN            0x28
5542 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5543 #define TG3_TSO5_FW_BSS_LEN             0x88
5544
5545 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5546         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5547         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5548         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5549         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5550         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5551         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5552         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5553         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5554         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5555         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5556         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5557         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5558         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5559         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5560         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5561         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5562         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5563         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5564         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5565         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5566         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5567         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5568         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5569         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5570         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5571         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5572         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5573         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5574         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5575         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5576         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5577         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5578         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5579         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5580         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5581         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5582         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5583         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5584         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5585         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5586         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5587         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5588         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5589         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5590         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5591         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5592         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5593         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5594         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5595         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5596         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5597         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5598         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5599         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5600         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5601         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5602         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5603         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5604         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5605         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5606         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5607         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5608         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5609         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5610         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5611         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5612         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5613         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5614         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5615         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5616         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5617         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5618         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5619         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5620         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5621         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5622         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5623         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5624         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5625         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5626         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5627         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5628         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5629         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5630         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5631         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5632         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5633         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5634         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5635         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5636         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5637         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5638         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5639         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5640         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5641         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5642         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5643         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5644         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5645         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5646         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5647         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5648         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5649         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5650         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5651         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5652         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5653         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5654         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5655         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5656         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5657         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5658         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5659         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5660         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5661         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5662         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5663         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5664         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5665         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5666         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5667         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5668         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5669         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5670         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5671         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5672         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5673         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5674         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5675         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5676         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5677         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5678         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5679         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5680         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5681         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5682         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5683         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5684         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5685         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5686         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5687         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5688         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5689         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5690         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5691         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5692         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5693         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5694         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5695         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5696         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5697         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5698         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5699         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5700         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5701         0x00000000, 0x00000000, 0x00000000,
5702 };
5703
5704 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5705         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5706         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5707         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5708         0x00000000, 0x00000000, 0x00000000,
5709 };
5710
5711 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5712         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5713         0x00000000, 0x00000000, 0x00000000,
5714 };
5715
5716 /* tp->lock is held. */
5717 static int tg3_load_tso_firmware(struct tg3 *tp)
5718 {
5719         struct fw_info info;
5720         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5721         int err, i;
5722
5723         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5724                 return 0;
5725
5726         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5727                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5728                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5729                 info.text_data = &tg3Tso5FwText[0];
5730                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5731                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5732                 info.rodata_data = &tg3Tso5FwRodata[0];
5733                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5734                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5735                 info.data_data = &tg3Tso5FwData[0];
5736                 cpu_base = RX_CPU_BASE;
5737                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5738                 cpu_scratch_size = (info.text_len +
5739                                     info.rodata_len +
5740                                     info.data_len +
5741                                     TG3_TSO5_FW_SBSS_LEN +
5742                                     TG3_TSO5_FW_BSS_LEN);
5743         } else {
5744                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5745                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5746                 info.text_data = &tg3TsoFwText[0];
5747                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5748                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5749                 info.rodata_data = &tg3TsoFwRodata[0];
5750                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5751                 info.data_len = TG3_TSO_FW_DATA_LEN;
5752                 info.data_data = &tg3TsoFwData[0];
5753                 cpu_base = TX_CPU_BASE;
5754                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5755                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5756         }
5757
5758         err = tg3_load_firmware_cpu(tp, cpu_base,
5759                                     cpu_scratch_base, cpu_scratch_size,
5760                                     &info);
5761         if (err)
5762                 return err;
5763
5764         /* Now startup the cpu. */
5765         tw32(cpu_base + CPU_STATE, 0xffffffff);
5766         tw32_f(cpu_base + CPU_PC,    info.text_base);
5767
5768         for (i = 0; i < 5; i++) {
5769                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5770                         break;
5771                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5772                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5773                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5774                 udelay(1000);
5775         }
5776         if (i >= 5) {
5777                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5778                        "to set CPU PC, is %08x should be %08x\n",
5779                        tp->dev->name, tr32(cpu_base + CPU_PC),
5780                        info.text_base);
5781                 return -ENODEV;
5782         }
5783         tw32(cpu_base + CPU_STATE, 0xffffffff);
5784         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5785         return 0;
5786 }
5787
5788 #endif /* TG3_TSO_SUPPORT != 0 */
5789
5790 /* tp->lock is held. */
5791 static void __tg3_set_mac_addr(struct tg3 *tp)
5792 {
5793         u32 addr_high, addr_low;
5794         int i;
5795
5796         addr_high = ((tp->dev->dev_addr[0] << 8) |
5797                      tp->dev->dev_addr[1]);
5798         addr_low = ((tp->dev->dev_addr[2] << 24) |
5799                     (tp->dev->dev_addr[3] << 16) |
5800                     (tp->dev->dev_addr[4] <<  8) |
5801                     (tp->dev->dev_addr[5] <<  0));
5802         for (i = 0; i < 4; i++) {
5803                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5804                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5805         }
5806
5807         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5808             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5809                 for (i = 0; i < 12; i++) {
5810                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5811                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5812                 }
5813         }
5814
5815         addr_high = (tp->dev->dev_addr[0] +
5816                      tp->dev->dev_addr[1] +
5817                      tp->dev->dev_addr[2] +
5818                      tp->dev->dev_addr[3] +
5819                      tp->dev->dev_addr[4] +
5820                      tp->dev->dev_addr[5]) &
5821                 TX_BACKOFF_SEED_MASK;
5822         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5823 }
5824
5825 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5826 {
5827         struct tg3 *tp = netdev_priv(dev);
5828         struct sockaddr *addr = p;
5829         int err = 0;
5830
5831         if (!is_valid_ether_addr(addr->sa_data))
5832                 return -EINVAL;
5833
5834         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5835
5836         if (!netif_running(dev))
5837                 return 0;
5838
5839         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5840                 /* Reset chip so that ASF can re-init any MAC addresses it
5841                  * needs.
5842                  */
5843                 tg3_netif_stop(tp);
5844                 tg3_full_lock(tp, 1);
5845
5846                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5847                 err = tg3_restart_hw(tp, 0);
5848                 if (!err)
5849                         tg3_netif_start(tp);
5850                 tg3_full_unlock(tp);
5851         } else {
5852                 spin_lock_bh(&tp->lock);
5853                 __tg3_set_mac_addr(tp);
5854                 spin_unlock_bh(&tp->lock);
5855         }
5856
5857         return err;
5858 }
5859
5860 /* tp->lock is held. */
5861 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5862                            dma_addr_t mapping, u32 maxlen_flags,
5863                            u32 nic_addr)
5864 {
5865         tg3_write_mem(tp,
5866                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5867                       ((u64) mapping >> 32));
5868         tg3_write_mem(tp,
5869                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5870                       ((u64) mapping & 0xffffffff));
5871         tg3_write_mem(tp,
5872                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5873                        maxlen_flags);
5874
5875         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5876                 tg3_write_mem(tp,
5877                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5878                               nic_addr);
5879 }
5880
5881 static void __tg3_set_rx_mode(struct net_device *);
5882 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5883 {
5884         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5885         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5886         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5887         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5888         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5889                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5890                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5891         }
5892         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5893         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5894         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5895                 u32 val = ec->stats_block_coalesce_usecs;
5896
5897                 if (!netif_carrier_ok(tp->dev))
5898                         val = 0;
5899
5900                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5901         }
5902 }
5903
5904 /* tp->lock is held. */
5905 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
5906 {
5907         u32 val, rdmac_mode;
5908         int i, err, limit;
5909
5910         tg3_disable_ints(tp);
5911
5912         tg3_stop_fw(tp);
5913
5914         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5915
5916         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5917                 tg3_abort_hw(tp, 1);
5918         }
5919
5920         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy)
5921                 tg3_phy_reset(tp);
5922
5923         err = tg3_chip_reset(tp);
5924         if (err)
5925                 return err;
5926
5927         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5928
5929         /* This works around an issue with Athlon chipsets on
5930          * B3 tigon3 silicon.  This bit has no effect on any
5931          * other revision.  But do not set this on PCI Express
5932          * chips.
5933          */
5934         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5935                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5936         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5937
5938         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5939             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5940                 val = tr32(TG3PCI_PCISTATE);
5941                 val |= PCISTATE_RETRY_SAME_DMA;
5942                 tw32(TG3PCI_PCISTATE, val);
5943         }
5944
5945         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5946                 /* Enable some hw fixes.  */
5947                 val = tr32(TG3PCI_MSI_DATA);
5948                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5949                 tw32(TG3PCI_MSI_DATA, val);
5950         }
5951
5952         /* Descriptor ring init may make accesses to the
5953          * NIC SRAM area to setup the TX descriptors, so we
5954          * can only do this after the hardware has been
5955          * successfully reset.
5956          */
5957         err = tg3_init_rings(tp);
5958         if (err)
5959                 return err;
5960
5961         /* This value is determined during the probe time DMA
5962          * engine test, tg3_test_dma.
5963          */
5964         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5965
5966         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5967                           GRC_MODE_4X_NIC_SEND_RINGS |
5968                           GRC_MODE_NO_TX_PHDR_CSUM |
5969                           GRC_MODE_NO_RX_PHDR_CSUM);
5970         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5971
5972         /* Pseudo-header checksum is done by hardware logic and not
5973          * the offload processers, so make the chip do the pseudo-
5974          * header checksums on receive.  For transmit it is more
5975          * convenient to do the pseudo-header checksum in software
5976          * as Linux does that on transmit for us in all cases.
5977          */
5978         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5979
5980         tw32(GRC_MODE,
5981              tp->grc_mode |
5982              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5983
5984         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5985         val = tr32(GRC_MISC_CFG);
5986         val &= ~0xff;
5987         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5988         tw32(GRC_MISC_CFG, val);
5989
5990         /* Initialize MBUF/DESC pool. */
5991         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5992                 /* Do nothing.  */
5993         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5994                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5995                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5996                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5997                 else
5998                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5999                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6000                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6001         }
6002 #if TG3_TSO_SUPPORT != 0
6003         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6004                 int fw_len;
6005
6006                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6007                           TG3_TSO5_FW_RODATA_LEN +
6008                           TG3_TSO5_FW_DATA_LEN +
6009                           TG3_TSO5_FW_SBSS_LEN +
6010                           TG3_TSO5_FW_BSS_LEN);
6011                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6012                 tw32(BUFMGR_MB_POOL_ADDR,
6013                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6014                 tw32(BUFMGR_MB_POOL_SIZE,
6015                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6016         }
6017 #endif
6018
6019         if (tp->dev->mtu <= ETH_DATA_LEN) {
6020                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6021                      tp->bufmgr_config.mbuf_read_dma_low_water);
6022                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6023                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6024                 tw32(BUFMGR_MB_HIGH_WATER,
6025                      tp->bufmgr_config.mbuf_high_water);
6026         } else {
6027                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6028                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6029                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6030                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6031                 tw32(BUFMGR_MB_HIGH_WATER,
6032                      tp->bufmgr_config.mbuf_high_water_jumbo);
6033         }
6034         tw32(BUFMGR_DMA_LOW_WATER,
6035              tp->bufmgr_config.dma_low_water);
6036         tw32(BUFMGR_DMA_HIGH_WATER,
6037              tp->bufmgr_config.dma_high_water);
6038
6039         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6040         for (i = 0; i < 2000; i++) {
6041                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6042                         break;
6043                 udelay(10);
6044         }
6045         if (i >= 2000) {
6046                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6047                        tp->dev->name);
6048                 return -ENODEV;
6049         }
6050
6051         /* Setup replenish threshold. */
6052         val = tp->rx_pending / 8;
6053         if (val == 0)
6054                 val = 1;
6055         else if (val > tp->rx_std_max_post)
6056                 val = tp->rx_std_max_post;
6057
6058         tw32(RCVBDI_STD_THRESH, val);
6059
6060         /* Initialize TG3_BDINFO's at:
6061          *  RCVDBDI_STD_BD:     standard eth size rx ring
6062          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6063          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6064          *
6065          * like so:
6066          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6067          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6068          *                              ring attribute flags
6069          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6070          *
6071          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6072          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6073          *
6074          * The size of each ring is fixed in the firmware, but the location is
6075          * configurable.
6076          */
6077         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6078              ((u64) tp->rx_std_mapping >> 32));
6079         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6080              ((u64) tp->rx_std_mapping & 0xffffffff));
6081         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6082              NIC_SRAM_RX_BUFFER_DESC);
6083
6084         /* Don't even try to program the JUMBO/MINI buffer descriptor
6085          * configs on 5705.
6086          */
6087         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6088                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6089                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6090         } else {
6091                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6092                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6093
6094                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6095                      BDINFO_FLAGS_DISABLED);
6096
6097                 /* Setup replenish threshold. */
6098                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6099
6100                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6101                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6102                              ((u64) tp->rx_jumbo_mapping >> 32));
6103                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6104                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6105                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6106                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6107                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6108                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6109                 } else {
6110                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6111                              BDINFO_FLAGS_DISABLED);
6112                 }
6113
6114         }
6115
6116         /* There is only one send ring on 5705/5750, no need to explicitly
6117          * disable the others.
6118          */
6119         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6120                 /* Clear out send RCB ring in SRAM. */
6121                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6122                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6123                                       BDINFO_FLAGS_DISABLED);
6124         }
6125
6126         tp->tx_prod = 0;
6127         tp->tx_cons = 0;
6128         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6129         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6130
6131         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6132                        tp->tx_desc_mapping,
6133                        (TG3_TX_RING_SIZE <<
6134                         BDINFO_FLAGS_MAXLEN_SHIFT),
6135                        NIC_SRAM_TX_BUFFER_DESC);
6136
6137         /* There is only one receive return ring on 5705/5750, no need
6138          * to explicitly disable the others.
6139          */
6140         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6141                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6142                      i += TG3_BDINFO_SIZE) {
6143                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6144                                       BDINFO_FLAGS_DISABLED);
6145                 }
6146         }
6147
6148         tp->rx_rcb_ptr = 0;
6149         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6150
6151         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6152                        tp->rx_rcb_mapping,
6153                        (TG3_RX_RCB_RING_SIZE(tp) <<
6154                         BDINFO_FLAGS_MAXLEN_SHIFT),
6155                        0);
6156
6157         tp->rx_std_ptr = tp->rx_pending;
6158         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6159                      tp->rx_std_ptr);
6160
6161         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6162                                                 tp->rx_jumbo_pending : 0;
6163         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6164                      tp->rx_jumbo_ptr);
6165
6166         /* Initialize MAC address and backoff seed. */
6167         __tg3_set_mac_addr(tp);
6168
6169         /* MTU + ethernet header + FCS + optional VLAN tag */
6170         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6171
6172         /* The slot time is changed by tg3_setup_phy if we
6173          * run at gigabit with half duplex.
6174          */
6175         tw32(MAC_TX_LENGTHS,
6176              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6177              (6 << TX_LENGTHS_IPG_SHIFT) |
6178              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6179
6180         /* Receive rules. */
6181         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6182         tw32(RCVLPC_CONFIG, 0x0181);
6183
6184         /* Calculate RDMAC_MODE setting early, we need it to determine
6185          * the RCVLPC_STATE_ENABLE mask.
6186          */
6187         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6188                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6189                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6190                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6191                       RDMAC_MODE_LNGREAD_ENAB);
6192         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6193                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6194
6195         /* If statement applies to 5705 and 5750 PCI devices only */
6196         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6197              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6198             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6199                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6200                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6201                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6202                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6203                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6204                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6205                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6206                 }
6207         }
6208
6209         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6210                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6211
6212 #if TG3_TSO_SUPPORT != 0
6213         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6214                 rdmac_mode |= (1 << 27);
6215 #endif
6216
6217         /* Receive/send statistics. */
6218         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6219                 val = tr32(RCVLPC_STATS_ENABLE);
6220                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6221                 tw32(RCVLPC_STATS_ENABLE, val);
6222         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6223                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6224                 val = tr32(RCVLPC_STATS_ENABLE);
6225                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6226                 tw32(RCVLPC_STATS_ENABLE, val);
6227         } else {
6228                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6229         }
6230         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6231         tw32(SNDDATAI_STATSENAB, 0xffffff);
6232         tw32(SNDDATAI_STATSCTRL,
6233              (SNDDATAI_SCTRL_ENABLE |
6234               SNDDATAI_SCTRL_FASTUPD));
6235
6236         /* Setup host coalescing engine. */
6237         tw32(HOSTCC_MODE, 0);
6238         for (i = 0; i < 2000; i++) {
6239                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6240                         break;
6241                 udelay(10);
6242         }
6243
6244         __tg3_set_coalesce(tp, &tp->coal);
6245
6246         /* set status block DMA address */
6247         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6248              ((u64) tp->status_mapping >> 32));
6249         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6250              ((u64) tp->status_mapping & 0xffffffff));
6251
6252         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6253                 /* Status/statistics block address.  See tg3_timer,
6254                  * the tg3_periodic_fetch_stats call there, and
6255                  * tg3_get_stats to see how this works for 5705/5750 chips.
6256                  */
6257                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6258                      ((u64) tp->stats_mapping >> 32));
6259                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6260                      ((u64) tp->stats_mapping & 0xffffffff));
6261                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6262                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6263         }
6264
6265         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6266
6267         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6268         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6269         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6270                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6271
6272         /* Clear statistics/status block in chip, and status block in ram. */
6273         for (i = NIC_SRAM_STATS_BLK;
6274              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6275              i += sizeof(u32)) {
6276                 tg3_write_mem(tp, i, 0);
6277                 udelay(40);
6278         }
6279         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6280
6281         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6282                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6283                 /* reset to prevent losing 1st rx packet intermittently */
6284                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6285                 udelay(10);
6286         }
6287
6288         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6289                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6290         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6291         udelay(40);
6292
6293         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6294          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6295          * register to preserve the GPIO settings for LOMs. The GPIOs,
6296          * whether used as inputs or outputs, are set by boot code after
6297          * reset.
6298          */
6299         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6300                 u32 gpio_mask;
6301
6302                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6303                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
6304
6305                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6306                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6307                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6308
6309                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6310                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6311
6312                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6313
6314                 /* GPIO1 must be driven high for eeprom write protect */
6315                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6316                                        GRC_LCLCTRL_GPIO_OUTPUT1);
6317         }
6318         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6319         udelay(100);
6320
6321         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6322         tp->last_tag = 0;
6323
6324         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6325                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6326                 udelay(40);
6327         }
6328
6329         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6330                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6331                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6332                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6333                WDMAC_MODE_LNGREAD_ENAB);
6334
6335         /* If statement applies to 5705 and 5750 PCI devices only */
6336         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6337              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6338             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6339                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6340                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6341                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6342                         /* nothing */
6343                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6344                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6345                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6346                         val |= WDMAC_MODE_RX_ACCEL;
6347                 }
6348         }
6349
6350         /* Enable host coalescing bug fix */
6351         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6352             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6353                 val |= (1 << 29);
6354
6355         tw32_f(WDMAC_MODE, val);
6356         udelay(40);
6357
6358         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6359                 val = tr32(TG3PCI_X_CAPS);
6360                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6361                         val &= ~PCIX_CAPS_BURST_MASK;
6362                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6363                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6364                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6365                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6366                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6367                                 val |= (tp->split_mode_max_reqs <<
6368                                         PCIX_CAPS_SPLIT_SHIFT);
6369                 }
6370                 tw32(TG3PCI_X_CAPS, val);
6371         }
6372
6373         tw32_f(RDMAC_MODE, rdmac_mode);
6374         udelay(40);
6375
6376         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6377         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6378                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6379         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6380         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6381         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6382         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6383         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6384 #if TG3_TSO_SUPPORT != 0
6385         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6386                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6387 #endif
6388         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6389         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6390
6391         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6392                 err = tg3_load_5701_a0_firmware_fix(tp);
6393                 if (err)
6394                         return err;
6395         }
6396
6397 #if TG3_TSO_SUPPORT != 0
6398         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6399                 err = tg3_load_tso_firmware(tp);
6400                 if (err)
6401                         return err;
6402         }
6403 #endif
6404
6405         tp->tx_mode = TX_MODE_ENABLE;
6406         tw32_f(MAC_TX_MODE, tp->tx_mode);
6407         udelay(100);
6408
6409         tp->rx_mode = RX_MODE_ENABLE;
6410         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6411                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6412
6413         tw32_f(MAC_RX_MODE, tp->rx_mode);
6414         udelay(10);
6415
6416         if (tp->link_config.phy_is_low_power) {
6417                 tp->link_config.phy_is_low_power = 0;
6418                 tp->link_config.speed = tp->link_config.orig_speed;
6419                 tp->link_config.duplex = tp->link_config.orig_duplex;
6420                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6421         }
6422
6423         tp->mi_mode = MAC_MI_MODE_BASE;
6424         tw32_f(MAC_MI_MODE, tp->mi_mode);
6425         udelay(80);
6426
6427         tw32(MAC_LED_CTRL, tp->led_ctrl);
6428
6429         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6430         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6431                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6432                 udelay(10);
6433         }
6434         tw32_f(MAC_RX_MODE, tp->rx_mode);
6435         udelay(10);
6436
6437         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6438                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6439                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6440                         /* Set drive transmission level to 1.2V  */
6441                         /* only if the signal pre-emphasis bit is not set  */
6442                         val = tr32(MAC_SERDES_CFG);
6443                         val &= 0xfffff000;
6444                         val |= 0x880;
6445                         tw32(MAC_SERDES_CFG, val);
6446                 }
6447                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6448                         tw32(MAC_SERDES_CFG, 0x616000);
6449         }
6450
6451         /* Prevent chip from dropping frames when flow control
6452          * is enabled.
6453          */
6454         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6455
6456         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6457             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6458                 /* Use hardware link auto-negotiation */
6459                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6460         }
6461
6462         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6463             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6464                 u32 tmp;
6465
6466                 tmp = tr32(SERDES_RX_CTRL);
6467                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6468                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6469                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6470                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6471         }
6472
6473         err = tg3_setup_phy(tp, reset_phy);
6474         if (err)
6475                 return err;
6476
6477         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6478                 u32 tmp;
6479
6480                 /* Clear CRC stats. */
6481                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6482                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6483                         tg3_readphy(tp, 0x14, &tmp);
6484                 }
6485         }
6486
6487         __tg3_set_rx_mode(tp->dev);
6488
6489         /* Initialize receive rules. */
6490         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6491         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6492         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6493         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6494
6495         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6496             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6497                 limit = 8;
6498         else
6499                 limit = 16;
6500         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6501                 limit -= 4;
6502         switch (limit) {
6503         case 16:
6504                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6505         case 15:
6506                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6507         case 14:
6508                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6509         case 13:
6510                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6511         case 12:
6512                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6513         case 11:
6514                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6515         case 10:
6516                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6517         case 9:
6518                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6519         case 8:
6520                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6521         case 7:
6522                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6523         case 6:
6524                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6525         case 5:
6526                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6527         case 4:
6528                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6529         case 3:
6530                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6531         case 2:
6532         case 1:
6533
6534         default:
6535                 break;
6536         };
6537
6538         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6539
6540         return 0;
6541 }
6542
6543 /* Called at device open time to get the chip ready for
6544  * packet processing.  Invoked with tp->lock held.
6545  */
6546 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6547 {
6548         int err;
6549
6550         /* Force the chip into D0. */
6551         err = tg3_set_power_state(tp, PCI_D0);
6552         if (err)
6553                 goto out;
6554
6555         tg3_switch_clocks(tp);
6556
6557         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6558
6559         err = tg3_reset_hw(tp, reset_phy);
6560
6561 out:
6562         return err;
6563 }
6564
6565 #define TG3_STAT_ADD32(PSTAT, REG) \
6566 do {    u32 __val = tr32(REG); \
6567         (PSTAT)->low += __val; \
6568         if ((PSTAT)->low < __val) \
6569                 (PSTAT)->high += 1; \
6570 } while (0)
6571
6572 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6573 {
6574         struct tg3_hw_stats *sp = tp->hw_stats;
6575
6576         if (!netif_carrier_ok(tp->dev))
6577                 return;
6578
6579         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6580         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6581         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6582         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6583         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6584         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6585         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6586         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6587         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6588         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6589         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6590         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6591         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6592
6593         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6594         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6595         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6596         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6597         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6598         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6599         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6600         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6601         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6602         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6603         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6604         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6605         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6606         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6607
6608         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6609         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6610         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
6611 }
6612
6613 static void tg3_timer(unsigned long __opaque)
6614 {
6615         struct tg3 *tp = (struct tg3 *) __opaque;
6616
6617         if (tp->irq_sync)
6618                 goto restart_timer;
6619
6620         spin_lock(&tp->lock);
6621
6622         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6623                 /* All of this garbage is because when using non-tagged
6624                  * IRQ status the mailbox/status_block protocol the chip
6625                  * uses with the cpu is race prone.
6626                  */
6627                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6628                         tw32(GRC_LOCAL_CTRL,
6629                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6630                 } else {
6631                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6632                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6633                 }
6634
6635                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6636                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6637                         spin_unlock(&tp->lock);
6638                         schedule_work(&tp->reset_task);
6639                         return;
6640                 }
6641         }
6642
6643         /* This part only runs once per second. */
6644         if (!--tp->timer_counter) {
6645                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6646                         tg3_periodic_fetch_stats(tp);
6647
6648                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6649                         u32 mac_stat;
6650                         int phy_event;
6651
6652                         mac_stat = tr32(MAC_STATUS);
6653
6654                         phy_event = 0;
6655                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6656                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6657                                         phy_event = 1;
6658                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6659                                 phy_event = 1;
6660
6661                         if (phy_event)
6662                                 tg3_setup_phy(tp, 0);
6663                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6664                         u32 mac_stat = tr32(MAC_STATUS);
6665                         int need_setup = 0;
6666
6667                         if (netif_carrier_ok(tp->dev) &&
6668                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6669                                 need_setup = 1;
6670                         }
6671                         if (! netif_carrier_ok(tp->dev) &&
6672                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6673                                          MAC_STATUS_SIGNAL_DET))) {
6674                                 need_setup = 1;
6675                         }
6676                         if (need_setup) {
6677                                 if (!tp->serdes_counter) {
6678                                         tw32_f(MAC_MODE,
6679                                              (tp->mac_mode &
6680                                               ~MAC_MODE_PORT_MODE_MASK));
6681                                         udelay(40);
6682                                         tw32_f(MAC_MODE, tp->mac_mode);
6683                                         udelay(40);
6684                                 }
6685                                 tg3_setup_phy(tp, 0);
6686                         }
6687                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6688                         tg3_serdes_parallel_detect(tp);
6689
6690                 tp->timer_counter = tp->timer_multiplier;
6691         }
6692
6693         /* Heartbeat is only sent once every 2 seconds.  */
6694         if (!--tp->asf_counter) {
6695                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6696                         u32 val;
6697
6698                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6699                                       FWCMD_NICDRV_ALIVE2);
6700                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6701                         /* 5 seconds timeout */
6702                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6703                         val = tr32(GRC_RX_CPU_EVENT);
6704                         val |= (1 << 14);
6705                         tw32(GRC_RX_CPU_EVENT, val);
6706                 }
6707                 tp->asf_counter = tp->asf_multiplier;
6708         }
6709
6710         spin_unlock(&tp->lock);
6711
6712 restart_timer:
6713         tp->timer.expires = jiffies + tp->timer_offset;
6714         add_timer(&tp->timer);
6715 }
6716
6717 static int tg3_request_irq(struct tg3 *tp)
6718 {
6719         irqreturn_t (*fn)(int, void *, struct pt_regs *);
6720         unsigned long flags;
6721         struct net_device *dev = tp->dev;
6722
6723         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6724                 fn = tg3_msi;
6725                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6726                         fn = tg3_msi_1shot;
6727                 flags = IRQF_SAMPLE_RANDOM;
6728         } else {
6729                 fn = tg3_interrupt;
6730                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6731                         fn = tg3_interrupt_tagged;
6732                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
6733         }
6734         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6735 }
6736
6737 static int tg3_test_interrupt(struct tg3 *tp)
6738 {
6739         struct net_device *dev = tp->dev;
6740         int err, i;
6741         u32 int_mbox = 0;
6742
6743         if (!netif_running(dev))
6744                 return -ENODEV;
6745
6746         tg3_disable_ints(tp);
6747
6748         free_irq(tp->pdev->irq, dev);
6749
6750         err = request_irq(tp->pdev->irq, tg3_test_isr,
6751                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
6752         if (err)
6753                 return err;
6754
6755         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6756         tg3_enable_ints(tp);
6757
6758         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6759                HOSTCC_MODE_NOW);
6760
6761         for (i = 0; i < 5; i++) {
6762                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6763                                         TG3_64BIT_REG_LOW);
6764                 if (int_mbox != 0)
6765                         break;
6766                 msleep(10);
6767         }
6768
6769         tg3_disable_ints(tp);
6770
6771         free_irq(tp->pdev->irq, dev);
6772
6773         err = tg3_request_irq(tp);
6774
6775         if (err)
6776                 return err;
6777
6778         if (int_mbox != 0)
6779                 return 0;
6780
6781         return -EIO;
6782 }
6783
6784 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6785  * successfully restored
6786  */
6787 static int tg3_test_msi(struct tg3 *tp)
6788 {
6789         struct net_device *dev = tp->dev;
6790         int err;
6791         u16 pci_cmd;
6792
6793         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6794                 return 0;
6795
6796         /* Turn off SERR reporting in case MSI terminates with Master
6797          * Abort.
6798          */
6799         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6800         pci_write_config_word(tp->pdev, PCI_COMMAND,
6801                               pci_cmd & ~PCI_COMMAND_SERR);
6802
6803         err = tg3_test_interrupt(tp);
6804
6805         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6806
6807         if (!err)
6808                 return 0;
6809
6810         /* other failures */
6811         if (err != -EIO)
6812                 return err;
6813
6814         /* MSI test failed, go back to INTx mode */
6815         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6816                "switching to INTx mode. Please report this failure to "
6817                "the PCI maintainer and include system chipset information.\n",
6818                        tp->dev->name);
6819
6820         free_irq(tp->pdev->irq, dev);
6821         pci_disable_msi(tp->pdev);
6822
6823         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6824
6825         err = tg3_request_irq(tp);
6826         if (err)
6827                 return err;
6828
6829         /* Need to reset the chip because the MSI cycle may have terminated
6830          * with Master Abort.
6831          */
6832         tg3_full_lock(tp, 1);
6833
6834         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6835         err = tg3_init_hw(tp, 1);
6836
6837         tg3_full_unlock(tp);
6838
6839         if (err)
6840                 free_irq(tp->pdev->irq, dev);
6841
6842         return err;
6843 }
6844
6845 static int tg3_open(struct net_device *dev)
6846 {
6847         struct tg3 *tp = netdev_priv(dev);
6848         int err;
6849
6850         tg3_full_lock(tp, 0);
6851
6852         err = tg3_set_power_state(tp, PCI_D0);
6853         if (err)
6854                 return err;
6855
6856         tg3_disable_ints(tp);
6857         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6858
6859         tg3_full_unlock(tp);
6860
6861         /* The placement of this call is tied
6862          * to the setup and use of Host TX descriptors.
6863          */
6864         err = tg3_alloc_consistent(tp);
6865         if (err)
6866                 return err;
6867
6868         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6869             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6870             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6871             !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6872               (tp->pdev_peer == tp->pdev))) {
6873                 /* All MSI supporting chips should support tagged
6874                  * status.  Assert that this is the case.
6875                  */
6876                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6877                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6878                                "Not using MSI.\n", tp->dev->name);
6879                 } else if (pci_enable_msi(tp->pdev) == 0) {
6880                         u32 msi_mode;
6881
6882                         msi_mode = tr32(MSGINT_MODE);
6883                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6884                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6885                 }
6886         }
6887         err = tg3_request_irq(tp);
6888
6889         if (err) {
6890                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6891                         pci_disable_msi(tp->pdev);
6892                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6893                 }
6894                 tg3_free_consistent(tp);
6895                 return err;
6896         }
6897
6898         tg3_full_lock(tp, 0);
6899
6900         err = tg3_init_hw(tp, 1);
6901         if (err) {
6902                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6903                 tg3_free_rings(tp);
6904         } else {
6905                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6906                         tp->timer_offset = HZ;
6907                 else
6908                         tp->timer_offset = HZ / 10;
6909
6910                 BUG_ON(tp->timer_offset > HZ);
6911                 tp->timer_counter = tp->timer_multiplier =
6912                         (HZ / tp->timer_offset);
6913                 tp->asf_counter = tp->asf_multiplier =
6914                         ((HZ / tp->timer_offset) * 2);
6915
6916                 init_timer(&tp->timer);
6917                 tp->timer.expires = jiffies + tp->timer_offset;
6918                 tp->timer.data = (unsigned long) tp;
6919                 tp->timer.function = tg3_timer;
6920         }
6921
6922         tg3_full_unlock(tp);
6923
6924         if (err) {
6925                 free_irq(tp->pdev->irq, dev);
6926                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6927                         pci_disable_msi(tp->pdev);
6928                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6929                 }
6930                 tg3_free_consistent(tp);
6931                 return err;
6932         }
6933
6934         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6935                 err = tg3_test_msi(tp);
6936
6937                 if (err) {
6938                         tg3_full_lock(tp, 0);
6939
6940                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6941                                 pci_disable_msi(tp->pdev);
6942                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6943                         }
6944                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6945                         tg3_free_rings(tp);
6946                         tg3_free_consistent(tp);
6947
6948                         tg3_full_unlock(tp);
6949
6950                         return err;
6951                 }
6952
6953                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6954                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
6955                                 u32 val = tr32(0x7c04);
6956
6957                                 tw32(0x7c04, val | (1 << 29));
6958                         }
6959                 }
6960         }
6961
6962         tg3_full_lock(tp, 0);
6963
6964         add_timer(&tp->timer);
6965         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6966         tg3_enable_ints(tp);
6967
6968         tg3_full_unlock(tp);
6969
6970         netif_start_queue(dev);
6971
6972         return 0;
6973 }
6974
6975 #if 0
6976 /*static*/ void tg3_dump_state(struct tg3 *tp)
6977 {
6978         u32 val32, val32_2, val32_3, val32_4, val32_5;
6979         u16 val16;
6980         int i;
6981
6982         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6983         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6984         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6985                val16, val32);
6986
6987         /* MAC block */
6988         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6989                tr32(MAC_MODE), tr32(MAC_STATUS));
6990         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6991                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6992         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6993                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6994         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6995                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6996
6997         /* Send data initiator control block */
6998         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6999                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7000         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7001                tr32(SNDDATAI_STATSCTRL));
7002
7003         /* Send data completion control block */
7004         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7005
7006         /* Send BD ring selector block */
7007         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7008                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7009
7010         /* Send BD initiator control block */
7011         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7012                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7013
7014         /* Send BD completion control block */
7015         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7016
7017         /* Receive list placement control block */
7018         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7019                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7020         printk("       RCVLPC_STATSCTRL[%08x]\n",
7021                tr32(RCVLPC_STATSCTRL));
7022
7023         /* Receive data and receive BD initiator control block */
7024         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7025                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7026
7027         /* Receive data completion control block */
7028         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7029                tr32(RCVDCC_MODE));
7030
7031         /* Receive BD initiator control block */
7032         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7033                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7034
7035         /* Receive BD completion control block */
7036         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7037                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7038
7039         /* Receive list selector control block */
7040         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7041                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7042
7043         /* Mbuf cluster free block */
7044         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7045                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7046
7047         /* Host coalescing control block */
7048         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7049                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7050         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7051                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7052                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7053         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7054                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7055                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7056         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7057                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7058         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7059                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7060
7061         /* Memory arbiter control block */
7062         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7063                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7064
7065         /* Buffer manager control block */
7066         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7067                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7068         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7069                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7070         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7071                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7072                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7073                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7074
7075         /* Read DMA control block */
7076         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7077                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7078
7079         /* Write DMA control block */
7080         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7081                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7082
7083         /* DMA completion block */
7084         printk("DEBUG: DMAC_MODE[%08x]\n",
7085                tr32(DMAC_MODE));
7086
7087         /* GRC block */
7088         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7089                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7090         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7091                tr32(GRC_LOCAL_CTRL));
7092
7093         /* TG3_BDINFOs */
7094         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7095                tr32(RCVDBDI_JUMBO_BD + 0x0),
7096                tr32(RCVDBDI_JUMBO_BD + 0x4),
7097                tr32(RCVDBDI_JUMBO_BD + 0x8),
7098                tr32(RCVDBDI_JUMBO_BD + 0xc));
7099         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7100                tr32(RCVDBDI_STD_BD + 0x0),
7101                tr32(RCVDBDI_STD_BD + 0x4),
7102                tr32(RCVDBDI_STD_BD + 0x8),
7103                tr32(RCVDBDI_STD_BD + 0xc));
7104         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7105                tr32(RCVDBDI_MINI_BD + 0x0),
7106                tr32(RCVDBDI_MINI_BD + 0x4),
7107                tr32(RCVDBDI_MINI_BD + 0x8),
7108                tr32(RCVDBDI_MINI_BD + 0xc));
7109
7110         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7111         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7112         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7113         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7114         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7115                val32, val32_2, val32_3, val32_4);
7116
7117         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7118         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7119         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7120         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7121         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7122                val32, val32_2, val32_3, val32_4);
7123
7124         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7125         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7126         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7127         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7128         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7129         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7130                val32, val32_2, val32_3, val32_4, val32_5);
7131
7132         /* SW status block */
7133         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7134                tp->hw_status->status,
7135                tp->hw_status->status_tag,
7136                tp->hw_status->rx_jumbo_consumer,
7137                tp->hw_status->rx_consumer,
7138                tp->hw_status->rx_mini_consumer,
7139                tp->hw_status->idx[0].rx_producer,
7140                tp->hw_status->idx[0].tx_consumer);
7141
7142         /* SW statistics block */
7143         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7144                ((u32 *)tp->hw_stats)[0],
7145                ((u32 *)tp->hw_stats)[1],
7146                ((u32 *)tp->hw_stats)[2],
7147                ((u32 *)tp->hw_stats)[3]);
7148
7149         /* Mailboxes */
7150         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7151                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7152                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7153                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7154                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7155
7156         /* NIC side send descriptors. */
7157         for (i = 0; i < 6; i++) {
7158                 unsigned long txd;
7159
7160                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7161                         + (i * sizeof(struct tg3_tx_buffer_desc));
7162                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7163                        i,
7164                        readl(txd + 0x0), readl(txd + 0x4),
7165                        readl(txd + 0x8), readl(txd + 0xc));
7166         }
7167
7168         /* NIC side RX descriptors. */
7169         for (i = 0; i < 6; i++) {
7170                 unsigned long rxd;
7171
7172                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7173                         + (i * sizeof(struct tg3_rx_buffer_desc));
7174                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7175                        i,
7176                        readl(rxd + 0x0), readl(rxd + 0x4),
7177                        readl(rxd + 0x8), readl(rxd + 0xc));
7178                 rxd += (4 * sizeof(u32));
7179                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7180                        i,
7181                        readl(rxd + 0x0), readl(rxd + 0x4),
7182                        readl(rxd + 0x8), readl(rxd + 0xc));
7183         }
7184
7185         for (i = 0; i < 6; i++) {
7186                 unsigned long rxd;
7187
7188                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7189                         + (i * sizeof(struct tg3_rx_buffer_desc));
7190                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7191                        i,
7192                        readl(rxd + 0x0), readl(rxd + 0x4),
7193                        readl(rxd + 0x8), readl(rxd + 0xc));
7194                 rxd += (4 * sizeof(u32));
7195                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7196                        i,
7197                        readl(rxd + 0x0), readl(rxd + 0x4),
7198                        readl(rxd + 0x8), readl(rxd + 0xc));
7199         }
7200 }
7201 #endif
7202
7203 static struct net_device_stats *tg3_get_stats(struct net_device *);
7204 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7205
7206 static int tg3_close(struct net_device *dev)
7207 {
7208         struct tg3 *tp = netdev_priv(dev);
7209
7210         /* Calling flush_scheduled_work() may deadlock because
7211          * linkwatch_event() may be on the workqueue and it will try to get
7212          * the rtnl_lock which we are holding.
7213          */
7214         while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7215                 msleep(1);
7216
7217         netif_stop_queue(dev);
7218
7219         del_timer_sync(&tp->timer);
7220
7221         tg3_full_lock(tp, 1);
7222 #if 0
7223         tg3_dump_state(tp);
7224 #endif
7225
7226         tg3_disable_ints(tp);
7227
7228         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7229         tg3_free_rings(tp);
7230         tp->tg3_flags &=
7231                 ~(TG3_FLAG_INIT_COMPLETE |
7232                   TG3_FLAG_GOT_SERDES_FLOWCTL);
7233
7234         tg3_full_unlock(tp);
7235
7236         free_irq(tp->pdev->irq, dev);
7237         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7238                 pci_disable_msi(tp->pdev);
7239                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7240         }
7241
7242         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7243                sizeof(tp->net_stats_prev));
7244         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7245                sizeof(tp->estats_prev));
7246
7247         tg3_free_consistent(tp);
7248
7249         tg3_set_power_state(tp, PCI_D3hot);
7250
7251         netif_carrier_off(tp->dev);
7252
7253         return 0;
7254 }
7255
7256 static inline unsigned long get_stat64(tg3_stat64_t *val)
7257 {
7258         unsigned long ret;
7259
7260 #if (BITS_PER_LONG == 32)
7261         ret = val->low;
7262 #else
7263         ret = ((u64)val->high << 32) | ((u64)val->low);
7264 #endif
7265         return ret;
7266 }
7267
7268 static unsigned long calc_crc_errors(struct tg3 *tp)
7269 {
7270         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7271
7272         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7273             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7274              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7275                 u32 val;
7276
7277                 spin_lock_bh(&tp->lock);
7278                 if (!tg3_readphy(tp, 0x1e, &val)) {
7279                         tg3_writephy(tp, 0x1e, val | 0x8000);
7280                         tg3_readphy(tp, 0x14, &val);
7281                 } else
7282                         val = 0;
7283                 spin_unlock_bh(&tp->lock);
7284
7285                 tp->phy_crc_errors += val;
7286
7287                 return tp->phy_crc_errors;
7288         }
7289
7290         return get_stat64(&hw_stats->rx_fcs_errors);
7291 }
7292
7293 #define ESTAT_ADD(member) \
7294         estats->member =        old_estats->member + \
7295                                 get_stat64(&hw_stats->member)
7296
7297 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7298 {
7299         struct tg3_ethtool_stats *estats = &tp->estats;
7300         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7301         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7302
7303         if (!hw_stats)
7304                 return old_estats;
7305
7306         ESTAT_ADD(rx_octets);
7307         ESTAT_ADD(rx_fragments);
7308         ESTAT_ADD(rx_ucast_packets);
7309         ESTAT_ADD(rx_mcast_packets);
7310         ESTAT_ADD(rx_bcast_packets);
7311         ESTAT_ADD(rx_fcs_errors);
7312         ESTAT_ADD(rx_align_errors);
7313         ESTAT_ADD(rx_xon_pause_rcvd);
7314         ESTAT_ADD(rx_xoff_pause_rcvd);
7315         ESTAT_ADD(rx_mac_ctrl_rcvd);
7316         ESTAT_ADD(rx_xoff_entered);
7317         ESTAT_ADD(rx_frame_too_long_errors);
7318         ESTAT_ADD(rx_jabbers);
7319         ESTAT_ADD(rx_undersize_packets);
7320         ESTAT_ADD(rx_in_length_errors);
7321         ESTAT_ADD(rx_out_length_errors);
7322         ESTAT_ADD(rx_64_or_less_octet_packets);
7323         ESTAT_ADD(rx_65_to_127_octet_packets);
7324         ESTAT_ADD(rx_128_to_255_octet_packets);
7325         ESTAT_ADD(rx_256_to_511_octet_packets);
7326         ESTAT_ADD(rx_512_to_1023_octet_packets);
7327         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7328         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7329         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7330         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7331         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7332
7333         ESTAT_ADD(tx_octets);
7334         ESTAT_ADD(tx_collisions);
7335         ESTAT_ADD(tx_xon_sent);
7336         ESTAT_ADD(tx_xoff_sent);
7337         ESTAT_ADD(tx_flow_control);
7338         ESTAT_ADD(tx_mac_errors);
7339         ESTAT_ADD(tx_single_collisions);
7340         ESTAT_ADD(tx_mult_collisions);
7341         ESTAT_ADD(tx_deferred);
7342         ESTAT_ADD(tx_excessive_collisions);
7343         ESTAT_ADD(tx_late_collisions);
7344         ESTAT_ADD(tx_collide_2times);
7345         ESTAT_ADD(tx_collide_3times);
7346         ESTAT_ADD(tx_collide_4times);
7347         ESTAT_ADD(tx_collide_5times);
7348         ESTAT_ADD(tx_collide_6times);
7349         ESTAT_ADD(tx_collide_7times);
7350         ESTAT_ADD(tx_collide_8times);
7351         ESTAT_ADD(tx_collide_9times);
7352         ESTAT_ADD(tx_collide_10times);
7353         ESTAT_ADD(tx_collide_11times);
7354         ESTAT_ADD(tx_collide_12times);
7355         ESTAT_ADD(tx_collide_13times);
7356         ESTAT_ADD(tx_collide_14times);
7357         ESTAT_ADD(tx_collide_15times);
7358         ESTAT_ADD(tx_ucast_packets);
7359         ESTAT_ADD(tx_mcast_packets);
7360         ESTAT_ADD(tx_bcast_packets);
7361         ESTAT_ADD(tx_carrier_sense_errors);
7362         ESTAT_ADD(tx_discards);
7363         ESTAT_ADD(tx_errors);
7364
7365         ESTAT_ADD(dma_writeq_full);
7366         ESTAT_ADD(dma_write_prioq_full);
7367         ESTAT_ADD(rxbds_empty);
7368         ESTAT_ADD(rx_discards);
7369         ESTAT_ADD(rx_errors);
7370         ESTAT_ADD(rx_threshold_hit);
7371
7372         ESTAT_ADD(dma_readq_full);
7373         ESTAT_ADD(dma_read_prioq_full);
7374         ESTAT_ADD(tx_comp_queue_full);
7375
7376         ESTAT_ADD(ring_set_send_prod_index);
7377         ESTAT_ADD(ring_status_update);
7378         ESTAT_ADD(nic_irqs);
7379         ESTAT_ADD(nic_avoided_irqs);
7380         ESTAT_ADD(nic_tx_threshold_hit);
7381
7382         return estats;
7383 }
7384
7385 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7386 {
7387         struct tg3 *tp = netdev_priv(dev);
7388         struct net_device_stats *stats = &tp->net_stats;
7389         struct net_device_stats *old_stats = &tp->net_stats_prev;
7390         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7391
7392         if (!hw_stats)
7393                 return old_stats;
7394
7395         stats->rx_packets = old_stats->rx_packets +
7396                 get_stat64(&hw_stats->rx_ucast_packets) +
7397                 get_stat64(&hw_stats->rx_mcast_packets) +
7398                 get_stat64(&hw_stats->rx_bcast_packets);
7399
7400         stats->tx_packets = old_stats->tx_packets +
7401                 get_stat64(&hw_stats->tx_ucast_packets) +
7402                 get_stat64(&hw_stats->tx_mcast_packets) +
7403                 get_stat64(&hw_stats->tx_bcast_packets);
7404
7405         stats->rx_bytes = old_stats->rx_bytes +
7406                 get_stat64(&hw_stats->rx_octets);
7407         stats->tx_bytes = old_stats->tx_bytes +
7408                 get_stat64(&hw_stats->tx_octets);
7409
7410         stats->rx_errors = old_stats->rx_errors +
7411                 get_stat64(&hw_stats->rx_errors);
7412         stats->tx_errors = old_stats->tx_errors +
7413                 get_stat64(&hw_stats->tx_errors) +
7414                 get_stat64(&hw_stats->tx_mac_errors) +
7415                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7416                 get_stat64(&hw_stats->tx_discards);
7417
7418         stats->multicast = old_stats->multicast +
7419                 get_stat64(&hw_stats->rx_mcast_packets);
7420         stats->collisions = old_stats->collisions +
7421                 get_stat64(&hw_stats->tx_collisions);
7422
7423         stats->rx_length_errors = old_stats->rx_length_errors +
7424                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7425                 get_stat64(&hw_stats->rx_undersize_packets);
7426
7427         stats->rx_over_errors = old_stats->rx_over_errors +
7428                 get_stat64(&hw_stats->rxbds_empty);
7429         stats->rx_frame_errors = old_stats->rx_frame_errors +
7430                 get_stat64(&hw_stats->rx_align_errors);
7431         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7432                 get_stat64(&hw_stats->tx_discards);
7433         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7434                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7435
7436         stats->rx_crc_errors = old_stats->rx_crc_errors +
7437                 calc_crc_errors(tp);
7438
7439         stats->rx_missed_errors = old_stats->rx_missed_errors +
7440                 get_stat64(&hw_stats->rx_discards);
7441
7442         return stats;
7443 }
7444
7445 static inline u32 calc_crc(unsigned char *buf, int len)
7446 {
7447         u32 reg;
7448         u32 tmp;
7449         int j, k;
7450
7451         reg = 0xffffffff;
7452
7453         for (j = 0; j < len; j++) {
7454                 reg ^= buf[j];
7455
7456                 for (k = 0; k < 8; k++) {
7457                         tmp = reg & 0x01;
7458
7459                         reg >>= 1;
7460
7461                         if (tmp) {
7462                                 reg ^= 0xedb88320;
7463                         }
7464                 }
7465         }
7466
7467         return ~reg;
7468 }
7469
7470 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7471 {
7472         /* accept or reject all multicast frames */
7473         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7474         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7475         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7476         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7477 }
7478
7479 static void __tg3_set_rx_mode(struct net_device *dev)
7480 {
7481         struct tg3 *tp = netdev_priv(dev);
7482         u32 rx_mode;
7483
7484         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7485                                   RX_MODE_KEEP_VLAN_TAG);
7486
7487         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7488          * flag clear.
7489          */
7490 #if TG3_VLAN_TAG_USED
7491         if (!tp->vlgrp &&
7492             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7493                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7494 #else
7495         /* By definition, VLAN is disabled always in this
7496          * case.
7497          */
7498         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7499                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7500 #endif
7501
7502         if (dev->flags & IFF_PROMISC) {
7503                 /* Promiscuous mode. */
7504                 rx_mode |= RX_MODE_PROMISC;
7505         } else if (dev->flags & IFF_ALLMULTI) {
7506                 /* Accept all multicast. */
7507                 tg3_set_multi (tp, 1);
7508         } else if (dev->mc_count < 1) {
7509                 /* Reject all multicast. */
7510                 tg3_set_multi (tp, 0);
7511         } else {
7512                 /* Accept one or more multicast(s). */
7513                 struct dev_mc_list *mclist;
7514                 unsigned int i;
7515                 u32 mc_filter[4] = { 0, };
7516                 u32 regidx;
7517                 u32 bit;
7518                 u32 crc;
7519
7520                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7521                      i++, mclist = mclist->next) {
7522
7523                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7524                         bit = ~crc & 0x7f;
7525                         regidx = (bit & 0x60) >> 5;
7526                         bit &= 0x1f;
7527                         mc_filter[regidx] |= (1 << bit);
7528                 }
7529
7530                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7531                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7532                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7533                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7534         }
7535
7536         if (rx_mode != tp->rx_mode) {
7537                 tp->rx_mode = rx_mode;
7538                 tw32_f(MAC_RX_MODE, rx_mode);
7539                 udelay(10);
7540         }
7541 }
7542
7543 static void tg3_set_rx_mode(struct net_device *dev)
7544 {
7545         struct tg3 *tp = netdev_priv(dev);
7546
7547         if (!netif_running(dev))
7548                 return;
7549
7550         tg3_full_lock(tp, 0);
7551         __tg3_set_rx_mode(dev);
7552         tg3_full_unlock(tp);
7553 }
7554
7555 #define TG3_REGDUMP_LEN         (32 * 1024)
7556
7557 static int tg3_get_regs_len(struct net_device *dev)
7558 {
7559         return TG3_REGDUMP_LEN;
7560 }
7561
7562 static void tg3_get_regs(struct net_device *dev,
7563                 struct ethtool_regs *regs, void *_p)
7564 {
7565         u32 *p = _p;
7566         struct tg3 *tp = netdev_priv(dev);
7567         u8 *orig_p = _p;
7568         int i;
7569
7570         regs->version = 0;
7571
7572         memset(p, 0, TG3_REGDUMP_LEN);
7573
7574         if (tp->link_config.phy_is_low_power)
7575                 return;
7576
7577         tg3_full_lock(tp, 0);
7578
7579 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7580 #define GET_REG32_LOOP(base,len)                \
7581 do {    p = (u32 *)(orig_p + (base));           \
7582         for (i = 0; i < len; i += 4)            \
7583                 __GET_REG32((base) + i);        \
7584 } while (0)
7585 #define GET_REG32_1(reg)                        \
7586 do {    p = (u32 *)(orig_p + (reg));            \
7587         __GET_REG32((reg));                     \
7588 } while (0)
7589
7590         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7591         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7592         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7593         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7594         GET_REG32_1(SNDDATAC_MODE);
7595         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7596         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7597         GET_REG32_1(SNDBDC_MODE);
7598         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7599         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7600         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7601         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7602         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7603         GET_REG32_1(RCVDCC_MODE);
7604         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7605         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7606         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7607         GET_REG32_1(MBFREE_MODE);
7608         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7609         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7610         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7611         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7612         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7613         GET_REG32_1(RX_CPU_MODE);
7614         GET_REG32_1(RX_CPU_STATE);
7615         GET_REG32_1(RX_CPU_PGMCTR);
7616         GET_REG32_1(RX_CPU_HWBKPT);
7617         GET_REG32_1(TX_CPU_MODE);
7618         GET_REG32_1(TX_CPU_STATE);
7619         GET_REG32_1(TX_CPU_PGMCTR);
7620         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7621         GET_REG32_LOOP(FTQ_RESET, 0x120);
7622         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7623         GET_REG32_1(DMAC_MODE);
7624         GET_REG32_LOOP(GRC_MODE, 0x4c);
7625         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7626                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7627
7628 #undef __GET_REG32
7629 #undef GET_REG32_LOOP
7630 #undef GET_REG32_1
7631
7632         tg3_full_unlock(tp);
7633 }
7634
7635 static int tg3_get_eeprom_len(struct net_device *dev)
7636 {
7637         struct tg3 *tp = netdev_priv(dev);
7638
7639         return tp->nvram_size;
7640 }
7641
7642 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7643 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7644
7645 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7646 {
7647         struct tg3 *tp = netdev_priv(dev);
7648         int ret;
7649         u8  *pd;
7650         u32 i, offset, len, val, b_offset, b_count;
7651
7652         if (tp->link_config.phy_is_low_power)
7653                 return -EAGAIN;
7654
7655         offset = eeprom->offset;
7656         len = eeprom->len;
7657         eeprom->len = 0;
7658
7659         eeprom->magic = TG3_EEPROM_MAGIC;
7660
7661         if (offset & 3) {
7662                 /* adjustments to start on required 4 byte boundary */
7663                 b_offset = offset & 3;
7664                 b_count = 4 - b_offset;
7665                 if (b_count > len) {
7666                         /* i.e. offset=1 len=2 */
7667                         b_count = len;
7668                 }
7669                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7670                 if (ret)
7671                         return ret;
7672                 val = cpu_to_le32(val);
7673                 memcpy(data, ((char*)&val) + b_offset, b_count);
7674                 len -= b_count;
7675                 offset += b_count;
7676                 eeprom->len += b_count;
7677         }
7678
7679         /* read bytes upto the last 4 byte boundary */
7680         pd = &data[eeprom->len];
7681         for (i = 0; i < (len - (len & 3)); i += 4) {
7682                 ret = tg3_nvram_read(tp, offset + i, &val);
7683                 if (ret) {
7684                         eeprom->len += i;
7685                         return ret;
7686                 }
7687                 val = cpu_to_le32(val);
7688                 memcpy(pd + i, &val, 4);
7689         }
7690         eeprom->len += i;
7691
7692         if (len & 3) {
7693                 /* read last bytes not ending on 4 byte boundary */
7694                 pd = &data[eeprom->len];
7695                 b_count = len & 3;
7696                 b_offset = offset + len - b_count;
7697                 ret = tg3_nvram_read(tp, b_offset, &val);
7698                 if (ret)
7699                         return ret;
7700                 val = cpu_to_le32(val);
7701                 memcpy(pd, ((char*)&val), b_count);
7702                 eeprom->len += b_count;
7703         }
7704         return 0;
7705 }
7706
7707 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7708
7709 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7710 {
7711         struct tg3 *tp = netdev_priv(dev);
7712         int ret;
7713         u32 offset, len, b_offset, odd_len, start, end;
7714         u8 *buf;
7715
7716         if (tp->link_config.phy_is_low_power)
7717                 return -EAGAIN;
7718
7719         if (eeprom->magic != TG3_EEPROM_MAGIC)
7720                 return -EINVAL;
7721
7722         offset = eeprom->offset;
7723         len = eeprom->len;
7724
7725         if ((b_offset = (offset & 3))) {
7726                 /* adjustments to start on required 4 byte boundary */
7727                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7728                 if (ret)
7729                         return ret;
7730                 start = cpu_to_le32(start);
7731                 len += b_offset;
7732                 offset &= ~3;
7733                 if (len < 4)
7734                         len = 4;
7735         }
7736
7737         odd_len = 0;
7738         if (len & 3) {
7739                 /* adjustments to end on required 4 byte boundary */
7740                 odd_len = 1;
7741                 len = (len + 3) & ~3;
7742                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7743                 if (ret)
7744                         return ret;
7745                 end = cpu_to_le32(end);
7746         }
7747
7748         buf = data;
7749         if (b_offset || odd_len) {
7750                 buf = kmalloc(len, GFP_KERNEL);
7751                 if (buf == 0)
7752                         return -ENOMEM;
7753                 if (b_offset)
7754                         memcpy(buf, &start, 4);
7755                 if (odd_len)
7756                         memcpy(buf+len-4, &end, 4);
7757                 memcpy(buf + b_offset, data, eeprom->len);
7758         }
7759
7760         ret = tg3_nvram_write_block(tp, offset, len, buf);
7761
7762         if (buf != data)
7763                 kfree(buf);
7764
7765         return ret;
7766 }
7767
7768 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7769 {
7770         struct tg3 *tp = netdev_priv(dev);
7771
7772         cmd->supported = (SUPPORTED_Autoneg);
7773
7774         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7775                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7776                                    SUPPORTED_1000baseT_Full);
7777
7778         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
7779                 cmd->supported |= (SUPPORTED_100baseT_Half |
7780                                   SUPPORTED_100baseT_Full |
7781                                   SUPPORTED_10baseT_Half |
7782                                   SUPPORTED_10baseT_Full |
7783                                   SUPPORTED_MII);
7784                 cmd->port = PORT_TP;
7785         } else {
7786                 cmd->supported |= SUPPORTED_FIBRE;
7787                 cmd->port = PORT_FIBRE;
7788         }
7789
7790         cmd->advertising = tp->link_config.advertising;
7791         if (netif_running(dev)) {
7792                 cmd->speed = tp->link_config.active_speed;
7793                 cmd->duplex = tp->link_config.active_duplex;
7794         }
7795         cmd->phy_address = PHY_ADDR;
7796         cmd->transceiver = 0;
7797         cmd->autoneg = tp->link_config.autoneg;
7798         cmd->maxtxpkt = 0;
7799         cmd->maxrxpkt = 0;
7800         return 0;
7801 }
7802
7803 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7804 {
7805         struct tg3 *tp = netdev_priv(dev);
7806
7807         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
7808                 /* These are the only valid advertisement bits allowed.  */
7809                 if (cmd->autoneg == AUTONEG_ENABLE &&
7810                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7811                                           ADVERTISED_1000baseT_Full |
7812                                           ADVERTISED_Autoneg |
7813                                           ADVERTISED_FIBRE)))
7814                         return -EINVAL;
7815                 /* Fiber can only do SPEED_1000.  */
7816                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7817                          (cmd->speed != SPEED_1000))
7818                         return -EINVAL;
7819         /* Copper cannot force SPEED_1000.  */
7820         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7821                    (cmd->speed == SPEED_1000))
7822                 return -EINVAL;
7823         else if ((cmd->speed == SPEED_1000) &&
7824                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7825                 return -EINVAL;
7826
7827         tg3_full_lock(tp, 0);
7828
7829         tp->link_config.autoneg = cmd->autoneg;
7830         if (cmd->autoneg == AUTONEG_ENABLE) {
7831                 tp->link_config.advertising = cmd->advertising;
7832                 tp->link_config.speed = SPEED_INVALID;
7833                 tp->link_config.duplex = DUPLEX_INVALID;
7834         } else {
7835                 tp->link_config.advertising = 0;
7836                 tp->link_config.speed = cmd->speed;
7837                 tp->link_config.duplex = cmd->duplex;
7838         }
7839
7840         if (netif_running(dev))
7841                 tg3_setup_phy(tp, 1);
7842
7843         tg3_full_unlock(tp);
7844
7845         return 0;
7846 }
7847
7848 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7849 {
7850         struct tg3 *tp = netdev_priv(dev);
7851
7852         strcpy(info->driver, DRV_MODULE_NAME);
7853         strcpy(info->version, DRV_MODULE_VERSION);
7854         strcpy(info->fw_version, tp->fw_ver);
7855         strcpy(info->bus_info, pci_name(tp->pdev));
7856 }
7857
7858 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7859 {
7860         struct tg3 *tp = netdev_priv(dev);
7861
7862         wol->supported = WAKE_MAGIC;
7863         wol->wolopts = 0;
7864         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7865                 wol->wolopts = WAKE_MAGIC;
7866         memset(&wol->sopass, 0, sizeof(wol->sopass));
7867 }
7868
7869 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7870 {
7871         struct tg3 *tp = netdev_priv(dev);
7872
7873         if (wol->wolopts & ~WAKE_MAGIC)
7874                 return -EINVAL;
7875         if ((wol->wolopts & WAKE_MAGIC) &&
7876             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7877             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7878                 return -EINVAL;
7879
7880         spin_lock_bh(&tp->lock);
7881         if (wol->wolopts & WAKE_MAGIC)
7882                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7883         else
7884                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7885         spin_unlock_bh(&tp->lock);
7886
7887         return 0;
7888 }
7889
7890 static u32 tg3_get_msglevel(struct net_device *dev)
7891 {
7892         struct tg3 *tp = netdev_priv(dev);
7893         return tp->msg_enable;
7894 }
7895
7896 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7897 {
7898         struct tg3 *tp = netdev_priv(dev);
7899         tp->msg_enable = value;
7900 }
7901
7902 #if TG3_TSO_SUPPORT != 0
7903 static int tg3_set_tso(struct net_device *dev, u32 value)
7904 {
7905         struct tg3 *tp = netdev_priv(dev);
7906
7907         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7908                 if (value)
7909                         return -EINVAL;
7910                 return 0;
7911         }
7912         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) {
7913                 if (value)
7914                         dev->features |= NETIF_F_TSO6;
7915                 else
7916                         dev->features &= ~NETIF_F_TSO6;
7917         }
7918         return ethtool_op_set_tso(dev, value);
7919 }
7920 #endif
7921
7922 static int tg3_nway_reset(struct net_device *dev)
7923 {
7924         struct tg3 *tp = netdev_priv(dev);
7925         u32 bmcr;
7926         int r;
7927
7928         if (!netif_running(dev))
7929                 return -EAGAIN;
7930
7931         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7932                 return -EINVAL;
7933
7934         spin_lock_bh(&tp->lock);
7935         r = -EINVAL;
7936         tg3_readphy(tp, MII_BMCR, &bmcr);
7937         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7938             ((bmcr & BMCR_ANENABLE) ||
7939              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7940                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7941                                            BMCR_ANENABLE);
7942                 r = 0;
7943         }
7944         spin_unlock_bh(&tp->lock);
7945
7946         return r;
7947 }
7948
7949 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7950 {
7951         struct tg3 *tp = netdev_priv(dev);
7952
7953         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7954         ering->rx_mini_max_pending = 0;
7955         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7956                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7957         else
7958                 ering->rx_jumbo_max_pending = 0;
7959
7960         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
7961
7962         ering->rx_pending = tp->rx_pending;
7963         ering->rx_mini_pending = 0;
7964         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7965                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7966         else
7967                 ering->rx_jumbo_pending = 0;
7968
7969         ering->tx_pending = tp->tx_pending;
7970 }
7971
7972 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7973 {
7974         struct tg3 *tp = netdev_priv(dev);
7975         int irq_sync = 0, err = 0;
7976
7977         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7978             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7979             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7980                 return -EINVAL;
7981
7982         if (netif_running(dev)) {
7983                 tg3_netif_stop(tp);
7984                 irq_sync = 1;
7985         }
7986
7987         tg3_full_lock(tp, irq_sync);
7988
7989         tp->rx_pending = ering->rx_pending;
7990
7991         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7992             tp->rx_pending > 63)
7993                 tp->rx_pending = 63;
7994         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7995         tp->tx_pending = ering->tx_pending;
7996
7997         if (netif_running(dev)) {
7998                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7999                 err = tg3_restart_hw(tp, 1);
8000                 if (!err)
8001                         tg3_netif_start(tp);
8002         }
8003
8004         tg3_full_unlock(tp);
8005
8006         return err;
8007 }
8008
8009 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8010 {
8011         struct tg3 *tp = netdev_priv(dev);
8012
8013         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8014         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8015         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8016 }
8017
8018 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8019 {
8020         struct tg3 *tp = netdev_priv(dev);
8021         int irq_sync = 0, err = 0;
8022
8023         if (netif_running(dev)) {
8024                 tg3_netif_stop(tp);
8025                 irq_sync = 1;
8026         }
8027
8028         tg3_full_lock(tp, irq_sync);
8029
8030         if (epause->autoneg)
8031                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8032         else
8033                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8034         if (epause->rx_pause)
8035                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8036         else
8037                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8038         if (epause->tx_pause)
8039                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8040         else
8041                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8042
8043         if (netif_running(dev)) {
8044                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8045                 err = tg3_restart_hw(tp, 1);
8046                 if (!err)
8047                         tg3_netif_start(tp);
8048         }
8049
8050         tg3_full_unlock(tp);
8051
8052         return err;
8053 }
8054
8055 static u32 tg3_get_rx_csum(struct net_device *dev)
8056 {
8057         struct tg3 *tp = netdev_priv(dev);
8058         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8059 }
8060
8061 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8062 {
8063         struct tg3 *tp = netdev_priv(dev);
8064
8065         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8066                 if (data != 0)
8067                         return -EINVAL;
8068                 return 0;
8069         }
8070
8071         spin_lock_bh(&tp->lock);
8072         if (data)
8073                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8074         else
8075                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8076         spin_unlock_bh(&tp->lock);
8077
8078         return 0;
8079 }
8080
8081 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8082 {
8083         struct tg3 *tp = netdev_priv(dev);
8084
8085         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8086                 if (data != 0)
8087                         return -EINVAL;
8088                 return 0;
8089         }
8090
8091         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8092             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8093                 ethtool_op_set_tx_hw_csum(dev, data);
8094         else
8095                 ethtool_op_set_tx_csum(dev, data);
8096
8097         return 0;
8098 }
8099
8100 static int tg3_get_stats_count (struct net_device *dev)
8101 {
8102         return TG3_NUM_STATS;
8103 }
8104
8105 static int tg3_get_test_count (struct net_device *dev)
8106 {
8107         return TG3_NUM_TEST;
8108 }
8109
8110 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8111 {
8112         switch (stringset) {
8113         case ETH_SS_STATS:
8114                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8115                 break;
8116         case ETH_SS_TEST:
8117                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8118                 break;
8119         default:
8120                 WARN_ON(1);     /* we need a WARN() */
8121                 break;
8122         }
8123 }
8124
8125 static int tg3_phys_id(struct net_device *dev, u32 data)
8126 {
8127         struct tg3 *tp = netdev_priv(dev);
8128         int i;
8129
8130         if (!netif_running(tp->dev))
8131                 return -EAGAIN;
8132
8133         if (data == 0)
8134                 data = 2;
8135
8136         for (i = 0; i < (data * 2); i++) {
8137                 if ((i % 2) == 0)
8138                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8139                                            LED_CTRL_1000MBPS_ON |
8140                                            LED_CTRL_100MBPS_ON |
8141                                            LED_CTRL_10MBPS_ON |
8142                                            LED_CTRL_TRAFFIC_OVERRIDE |
8143                                            LED_CTRL_TRAFFIC_BLINK |
8144                                            LED_CTRL_TRAFFIC_LED);
8145
8146                 else
8147                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8148                                            LED_CTRL_TRAFFIC_OVERRIDE);
8149
8150                 if (msleep_interruptible(500))
8151                         break;
8152         }
8153         tw32(MAC_LED_CTRL, tp->led_ctrl);
8154         return 0;
8155 }
8156
8157 static void tg3_get_ethtool_stats (struct net_device *dev,
8158                                    struct ethtool_stats *estats, u64 *tmp_stats)
8159 {
8160         struct tg3 *tp = netdev_priv(dev);
8161         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8162 }
8163
8164 #define NVRAM_TEST_SIZE 0x100
8165 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8166
8167 static int tg3_test_nvram(struct tg3 *tp)
8168 {
8169         u32 *buf, csum, magic;
8170         int i, j, err = 0, size;
8171
8172         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8173                 return -EIO;
8174
8175         if (magic == TG3_EEPROM_MAGIC)
8176                 size = NVRAM_TEST_SIZE;
8177         else if ((magic & 0xff000000) == 0xa5000000) {
8178                 if ((magic & 0xe00000) == 0x200000)
8179                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8180                 else
8181                         return 0;
8182         } else
8183                 return -EIO;
8184
8185         buf = kmalloc(size, GFP_KERNEL);
8186         if (buf == NULL)
8187                 return -ENOMEM;
8188
8189         err = -EIO;
8190         for (i = 0, j = 0; i < size; i += 4, j++) {
8191                 u32 val;
8192
8193                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8194                         break;
8195                 buf[j] = cpu_to_le32(val);
8196         }
8197         if (i < size)
8198                 goto out;
8199
8200         /* Selfboot format */
8201         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8202                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8203
8204                 for (i = 0; i < size; i++)
8205                         csum8 += buf8[i];
8206
8207                 if (csum8 == 0) {
8208                         err = 0;
8209                         goto out;
8210                 }
8211
8212                 err = -EIO;
8213                 goto out;
8214         }
8215
8216         /* Bootstrap checksum at offset 0x10 */
8217         csum = calc_crc((unsigned char *) buf, 0x10);
8218         if(csum != cpu_to_le32(buf[0x10/4]))
8219                 goto out;
8220
8221         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8222         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8223         if (csum != cpu_to_le32(buf[0xfc/4]))
8224                  goto out;
8225
8226         err = 0;
8227
8228 out:
8229         kfree(buf);
8230         return err;
8231 }
8232
8233 #define TG3_SERDES_TIMEOUT_SEC  2
8234 #define TG3_COPPER_TIMEOUT_SEC  6
8235
8236 static int tg3_test_link(struct tg3 *tp)
8237 {
8238         int i, max;
8239
8240         if (!netif_running(tp->dev))
8241                 return -ENODEV;
8242
8243         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8244                 max = TG3_SERDES_TIMEOUT_SEC;
8245         else
8246                 max = TG3_COPPER_TIMEOUT_SEC;
8247
8248         for (i = 0; i < max; i++) {
8249                 if (netif_carrier_ok(tp->dev))
8250                         return 0;
8251
8252                 if (msleep_interruptible(1000))
8253                         break;
8254         }
8255
8256         return -EIO;
8257 }
8258
8259 /* Only test the commonly used registers */
8260 static int tg3_test_registers(struct tg3 *tp)
8261 {
8262         int i, is_5705;
8263         u32 offset, read_mask, write_mask, val, save_val, read_val;
8264         static struct {
8265                 u16 offset;
8266                 u16 flags;
8267 #define TG3_FL_5705     0x1
8268 #define TG3_FL_NOT_5705 0x2
8269 #define TG3_FL_NOT_5788 0x4
8270                 u32 read_mask;
8271                 u32 write_mask;
8272         } reg_tbl[] = {
8273                 /* MAC Control Registers */
8274                 { MAC_MODE, TG3_FL_NOT_5705,
8275                         0x00000000, 0x00ef6f8c },
8276                 { MAC_MODE, TG3_FL_5705,
8277                         0x00000000, 0x01ef6b8c },
8278                 { MAC_STATUS, TG3_FL_NOT_5705,
8279                         0x03800107, 0x00000000 },
8280                 { MAC_STATUS, TG3_FL_5705,
8281                         0x03800100, 0x00000000 },
8282                 { MAC_ADDR_0_HIGH, 0x0000,
8283                         0x00000000, 0x0000ffff },
8284                 { MAC_ADDR_0_LOW, 0x0000,
8285                         0x00000000, 0xffffffff },
8286                 { MAC_RX_MTU_SIZE, 0x0000,
8287                         0x00000000, 0x0000ffff },
8288                 { MAC_TX_MODE, 0x0000,
8289                         0x00000000, 0x00000070 },
8290                 { MAC_TX_LENGTHS, 0x0000,
8291                         0x00000000, 0x00003fff },
8292                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8293                         0x00000000, 0x000007fc },
8294                 { MAC_RX_MODE, TG3_FL_5705,
8295                         0x00000000, 0x000007dc },
8296                 { MAC_HASH_REG_0, 0x0000,
8297                         0x00000000, 0xffffffff },
8298                 { MAC_HASH_REG_1, 0x0000,
8299                         0x00000000, 0xffffffff },
8300                 { MAC_HASH_REG_2, 0x0000,
8301                         0x00000000, 0xffffffff },
8302                 { MAC_HASH_REG_3, 0x0000,
8303                         0x00000000, 0xffffffff },
8304
8305                 /* Receive Data and Receive BD Initiator Control Registers. */
8306                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8307                         0x00000000, 0xffffffff },
8308                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8309                         0x00000000, 0xffffffff },
8310                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8311                         0x00000000, 0x00000003 },
8312                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8313                         0x00000000, 0xffffffff },
8314                 { RCVDBDI_STD_BD+0, 0x0000,
8315                         0x00000000, 0xffffffff },
8316                 { RCVDBDI_STD_BD+4, 0x0000,
8317                         0x00000000, 0xffffffff },
8318                 { RCVDBDI_STD_BD+8, 0x0000,
8319                         0x00000000, 0xffff0002 },
8320                 { RCVDBDI_STD_BD+0xc, 0x0000,
8321                         0x00000000, 0xffffffff },
8322
8323                 /* Receive BD Initiator Control Registers. */
8324                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8325                         0x00000000, 0xffffffff },
8326                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8327                         0x00000000, 0x000003ff },
8328                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8329                         0x00000000, 0xffffffff },
8330
8331                 /* Host Coalescing Control Registers. */
8332                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8333                         0x00000000, 0x00000004 },
8334                 { HOSTCC_MODE, TG3_FL_5705,
8335                         0x00000000, 0x000000f6 },
8336                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8337                         0x00000000, 0xffffffff },
8338                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8339                         0x00000000, 0x000003ff },
8340                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8341                         0x00000000, 0xffffffff },
8342                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8343                         0x00000000, 0x000003ff },
8344                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8345                         0x00000000, 0xffffffff },
8346                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8347                         0x00000000, 0x000000ff },
8348                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8349                         0x00000000, 0xffffffff },
8350                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8351                         0x00000000, 0x000000ff },
8352                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8353                         0x00000000, 0xffffffff },
8354                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8355                         0x00000000, 0xffffffff },
8356                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8357                         0x00000000, 0xffffffff },
8358                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8359                         0x00000000, 0x000000ff },
8360                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8361                         0x00000000, 0xffffffff },
8362                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8363                         0x00000000, 0x000000ff },
8364                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8365                         0x00000000, 0xffffffff },
8366                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8367                         0x00000000, 0xffffffff },
8368                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8369                         0x00000000, 0xffffffff },
8370                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8371                         0x00000000, 0xffffffff },
8372                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8373                         0x00000000, 0xffffffff },
8374                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8375                         0xffffffff, 0x00000000 },
8376                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8377                         0xffffffff, 0x00000000 },
8378
8379                 /* Buffer Manager Control Registers. */
8380                 { BUFMGR_MB_POOL_ADDR, 0x0000,
8381                         0x00000000, 0x007fff80 },
8382                 { BUFMGR_MB_POOL_SIZE, 0x0000,
8383                         0x00000000, 0x007fffff },
8384                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8385                         0x00000000, 0x0000003f },
8386                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8387                         0x00000000, 0x000001ff },
8388                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8389                         0x00000000, 0x000001ff },
8390                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8391                         0xffffffff, 0x00000000 },
8392                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8393                         0xffffffff, 0x00000000 },
8394
8395                 /* Mailbox Registers */
8396                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8397                         0x00000000, 0x000001ff },
8398                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8399                         0x00000000, 0x000001ff },
8400                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8401                         0x00000000, 0x000007ff },
8402                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8403                         0x00000000, 0x000001ff },
8404
8405                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8406         };
8407
8408         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8409                 is_5705 = 1;
8410         else
8411                 is_5705 = 0;
8412
8413         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8414                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8415                         continue;
8416
8417                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8418                         continue;
8419
8420                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8421                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8422                         continue;
8423
8424                 offset = (u32) reg_tbl[i].offset;
8425                 read_mask = reg_tbl[i].read_mask;
8426                 write_mask = reg_tbl[i].write_mask;
8427
8428                 /* Save the original register content */
8429                 save_val = tr32(offset);
8430
8431                 /* Determine the read-only value. */
8432                 read_val = save_val & read_mask;
8433
8434                 /* Write zero to the register, then make sure the read-only bits
8435                  * are not changed and the read/write bits are all zeros.
8436                  */
8437                 tw32(offset, 0);
8438
8439                 val = tr32(offset);
8440
8441                 /* Test the read-only and read/write bits. */
8442                 if (((val & read_mask) != read_val) || (val & write_mask))
8443                         goto out;
8444
8445                 /* Write ones to all the bits defined by RdMask and WrMask, then
8446                  * make sure the read-only bits are not changed and the
8447                  * read/write bits are all ones.
8448                  */
8449                 tw32(offset, read_mask | write_mask);
8450
8451                 val = tr32(offset);
8452
8453                 /* Test the read-only bits. */
8454                 if ((val & read_mask) != read_val)
8455                         goto out;
8456
8457                 /* Test the read/write bits. */
8458                 if ((val & write_mask) != write_mask)
8459                         goto out;
8460
8461                 tw32(offset, save_val);
8462         }
8463
8464         return 0;
8465
8466 out:
8467         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8468         tw32(offset, save_val);
8469         return -EIO;
8470 }
8471
8472 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8473 {
8474         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8475         int i;
8476         u32 j;
8477
8478         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8479                 for (j = 0; j < len; j += 4) {
8480                         u32 val;
8481
8482                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8483                         tg3_read_mem(tp, offset + j, &val);
8484                         if (val != test_pattern[i])
8485                                 return -EIO;
8486                 }
8487         }
8488         return 0;
8489 }
8490
8491 static int tg3_test_memory(struct tg3 *tp)
8492 {
8493         static struct mem_entry {
8494                 u32 offset;
8495                 u32 len;
8496         } mem_tbl_570x[] = {
8497                 { 0x00000000, 0x00b50},
8498                 { 0x00002000, 0x1c000},
8499                 { 0xffffffff, 0x00000}
8500         }, mem_tbl_5705[] = {
8501                 { 0x00000100, 0x0000c},
8502                 { 0x00000200, 0x00008},
8503                 { 0x00004000, 0x00800},
8504                 { 0x00006000, 0x01000},
8505                 { 0x00008000, 0x02000},
8506                 { 0x00010000, 0x0e000},
8507                 { 0xffffffff, 0x00000}
8508         }, mem_tbl_5755[] = {
8509                 { 0x00000200, 0x00008},
8510                 { 0x00004000, 0x00800},
8511                 { 0x00006000, 0x00800},
8512                 { 0x00008000, 0x02000},
8513                 { 0x00010000, 0x0c000},
8514                 { 0xffffffff, 0x00000}
8515         };
8516         struct mem_entry *mem_tbl;
8517         int err = 0;
8518         int i;
8519
8520         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8521                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8522                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8523                         mem_tbl = mem_tbl_5755;
8524                 else
8525                         mem_tbl = mem_tbl_5705;
8526         } else
8527                 mem_tbl = mem_tbl_570x;
8528
8529         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8530                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8531                     mem_tbl[i].len)) != 0)
8532                         break;
8533         }
8534
8535         return err;
8536 }
8537
8538 #define TG3_MAC_LOOPBACK        0
8539 #define TG3_PHY_LOOPBACK        1
8540
8541 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8542 {
8543         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8544         u32 desc_idx;
8545         struct sk_buff *skb, *rx_skb;
8546         u8 *tx_data;
8547         dma_addr_t map;
8548         int num_pkts, tx_len, rx_len, i, err;
8549         struct tg3_rx_buffer_desc *desc;
8550
8551         if (loopback_mode == TG3_MAC_LOOPBACK) {
8552                 /* HW errata - mac loopback fails in some cases on 5780.
8553                  * Normal traffic and PHY loopback are not affected by
8554                  * errata.
8555                  */
8556                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8557                         return 0;
8558
8559                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8560                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8561                            MAC_MODE_PORT_MODE_GMII;
8562                 tw32(MAC_MODE, mac_mode);
8563         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8564                 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8565                                            BMCR_SPEED1000);
8566                 udelay(40);
8567                 /* reset to prevent losing 1st rx packet intermittently */
8568                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8569                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8570                         udelay(10);
8571                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8572                 }
8573                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8574                            MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8575                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8576                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8577                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
8578                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8579                 }
8580                 tw32(MAC_MODE, mac_mode);
8581         }
8582         else
8583                 return -EINVAL;
8584
8585         err = -EIO;
8586
8587         tx_len = 1514;
8588         skb = netdev_alloc_skb(tp->dev, tx_len);
8589         if (!skb)
8590                 return -ENOMEM;
8591
8592         tx_data = skb_put(skb, tx_len);
8593         memcpy(tx_data, tp->dev->dev_addr, 6);
8594         memset(tx_data + 6, 0x0, 8);
8595
8596         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8597
8598         for (i = 14; i < tx_len; i++)
8599                 tx_data[i] = (u8) (i & 0xff);
8600
8601         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8602
8603         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8604              HOSTCC_MODE_NOW);
8605
8606         udelay(10);
8607
8608         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8609
8610         num_pkts = 0;
8611
8612         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8613
8614         tp->tx_prod++;
8615         num_pkts++;
8616
8617         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8618                      tp->tx_prod);
8619         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8620
8621         udelay(10);
8622
8623         for (i = 0; i < 10; i++) {
8624                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8625                        HOSTCC_MODE_NOW);
8626
8627                 udelay(10);
8628
8629                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8630                 rx_idx = tp->hw_status->idx[0].rx_producer;
8631                 if ((tx_idx == tp->tx_prod) &&
8632                     (rx_idx == (rx_start_idx + num_pkts)))
8633                         break;
8634         }
8635
8636         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8637         dev_kfree_skb(skb);
8638
8639         if (tx_idx != tp->tx_prod)
8640                 goto out;
8641
8642         if (rx_idx != rx_start_idx + num_pkts)
8643                 goto out;
8644
8645         desc = &tp->rx_rcb[rx_start_idx];
8646         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8647         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8648         if (opaque_key != RXD_OPAQUE_RING_STD)
8649                 goto out;
8650
8651         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8652             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8653                 goto out;
8654
8655         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8656         if (rx_len != tx_len)
8657                 goto out;
8658
8659         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8660
8661         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8662         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8663
8664         for (i = 14; i < tx_len; i++) {
8665                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8666                         goto out;
8667         }
8668         err = 0;
8669
8670         /* tg3_free_rings will unmap and free the rx_skb */
8671 out:
8672         return err;
8673 }
8674
8675 #define TG3_MAC_LOOPBACK_FAILED         1
8676 #define TG3_PHY_LOOPBACK_FAILED         2
8677 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8678                                          TG3_PHY_LOOPBACK_FAILED)
8679
8680 static int tg3_test_loopback(struct tg3 *tp)
8681 {
8682         int err = 0;
8683
8684         if (!netif_running(tp->dev))
8685                 return TG3_LOOPBACK_FAILED;
8686
8687         err = tg3_reset_hw(tp, 1);
8688         if (err)
8689                 return TG3_LOOPBACK_FAILED;
8690
8691         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8692                 err |= TG3_MAC_LOOPBACK_FAILED;
8693         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8694                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8695                         err |= TG3_PHY_LOOPBACK_FAILED;
8696         }
8697
8698         return err;
8699 }
8700
8701 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8702                           u64 *data)
8703 {
8704         struct tg3 *tp = netdev_priv(dev);
8705
8706         if (tp->link_config.phy_is_low_power)
8707                 tg3_set_power_state(tp, PCI_D0);
8708
8709         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8710
8711         if (tg3_test_nvram(tp) != 0) {
8712                 etest->flags |= ETH_TEST_FL_FAILED;
8713                 data[0] = 1;
8714         }
8715         if (tg3_test_link(tp) != 0) {
8716                 etest->flags |= ETH_TEST_FL_FAILED;
8717                 data[1] = 1;
8718         }
8719         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8720                 int err, irq_sync = 0;
8721
8722                 if (netif_running(dev)) {
8723                         tg3_netif_stop(tp);
8724                         irq_sync = 1;
8725                 }
8726
8727                 tg3_full_lock(tp, irq_sync);
8728
8729                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8730                 err = tg3_nvram_lock(tp);
8731                 tg3_halt_cpu(tp, RX_CPU_BASE);
8732                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8733                         tg3_halt_cpu(tp, TX_CPU_BASE);
8734                 if (!err)
8735                         tg3_nvram_unlock(tp);
8736
8737                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8738                         tg3_phy_reset(tp);
8739
8740                 if (tg3_test_registers(tp) != 0) {
8741                         etest->flags |= ETH_TEST_FL_FAILED;
8742                         data[2] = 1;
8743                 }
8744                 if (tg3_test_memory(tp) != 0) {
8745                         etest->flags |= ETH_TEST_FL_FAILED;
8746                         data[3] = 1;
8747                 }
8748                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8749                         etest->flags |= ETH_TEST_FL_FAILED;
8750
8751                 tg3_full_unlock(tp);
8752
8753                 if (tg3_test_interrupt(tp) != 0) {
8754                         etest->flags |= ETH_TEST_FL_FAILED;
8755                         data[5] = 1;
8756                 }
8757
8758                 tg3_full_lock(tp, 0);
8759
8760                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8761                 if (netif_running(dev)) {
8762                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8763                         if (!tg3_restart_hw(tp, 1))
8764                                 tg3_netif_start(tp);
8765                 }
8766
8767                 tg3_full_unlock(tp);
8768         }
8769         if (tp->link_config.phy_is_low_power)
8770                 tg3_set_power_state(tp, PCI_D3hot);
8771
8772 }
8773
8774 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8775 {
8776         struct mii_ioctl_data *data = if_mii(ifr);
8777         struct tg3 *tp = netdev_priv(dev);
8778         int err;
8779
8780         switch(cmd) {
8781         case SIOCGMIIPHY:
8782                 data->phy_id = PHY_ADDR;
8783
8784                 /* fallthru */
8785         case SIOCGMIIREG: {
8786                 u32 mii_regval;
8787
8788                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8789                         break;                  /* We have no PHY */
8790
8791                 if (tp->link_config.phy_is_low_power)
8792                         return -EAGAIN;
8793
8794                 spin_lock_bh(&tp->lock);
8795                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8796                 spin_unlock_bh(&tp->lock);
8797
8798                 data->val_out = mii_regval;
8799
8800                 return err;
8801         }
8802
8803         case SIOCSMIIREG:
8804                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8805                         break;                  /* We have no PHY */
8806
8807                 if (!capable(CAP_NET_ADMIN))
8808                         return -EPERM;
8809
8810                 if (tp->link_config.phy_is_low_power)
8811                         return -EAGAIN;
8812
8813                 spin_lock_bh(&tp->lock);
8814                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8815                 spin_unlock_bh(&tp->lock);
8816
8817                 return err;
8818
8819         default:
8820                 /* do nothing */
8821                 break;
8822         }
8823         return -EOPNOTSUPP;
8824 }
8825
8826 #if TG3_VLAN_TAG_USED
8827 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8828 {
8829         struct tg3 *tp = netdev_priv(dev);
8830
8831         if (netif_running(dev))
8832                 tg3_netif_stop(tp);
8833
8834         tg3_full_lock(tp, 0);
8835
8836         tp->vlgrp = grp;
8837
8838         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8839         __tg3_set_rx_mode(dev);
8840
8841         tg3_full_unlock(tp);
8842
8843         if (netif_running(dev))
8844                 tg3_netif_start(tp);
8845 }
8846
8847 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8848 {
8849         struct tg3 *tp = netdev_priv(dev);
8850
8851         if (netif_running(dev))
8852                 tg3_netif_stop(tp);
8853
8854         tg3_full_lock(tp, 0);
8855         if (tp->vlgrp)
8856                 tp->vlgrp->vlan_devices[vid] = NULL;
8857         tg3_full_unlock(tp);
8858
8859         if (netif_running(dev))
8860                 tg3_netif_start(tp);
8861 }
8862 #endif
8863
8864 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8865 {
8866         struct tg3 *tp = netdev_priv(dev);
8867
8868         memcpy(ec, &tp->coal, sizeof(*ec));
8869         return 0;
8870 }
8871
8872 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8873 {
8874         struct tg3 *tp = netdev_priv(dev);
8875         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8876         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8877
8878         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8879                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8880                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8881                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8882                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8883         }
8884
8885         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8886             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8887             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8888             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8889             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8890             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8891             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8892             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8893             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8894             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8895                 return -EINVAL;
8896
8897         /* No rx interrupts will be generated if both are zero */
8898         if ((ec->rx_coalesce_usecs == 0) &&
8899             (ec->rx_max_coalesced_frames == 0))
8900                 return -EINVAL;
8901
8902         /* No tx interrupts will be generated if both are zero */
8903         if ((ec->tx_coalesce_usecs == 0) &&
8904             (ec->tx_max_coalesced_frames == 0))
8905                 return -EINVAL;
8906
8907         /* Only copy relevant parameters, ignore all others. */
8908         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8909         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8910         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8911         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8912         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8913         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8914         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8915         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8916         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8917
8918         if (netif_running(dev)) {
8919                 tg3_full_lock(tp, 0);
8920                 __tg3_set_coalesce(tp, &tp->coal);
8921                 tg3_full_unlock(tp);
8922         }
8923         return 0;
8924 }
8925
8926 static const struct ethtool_ops tg3_ethtool_ops = {
8927         .get_settings           = tg3_get_settings,
8928         .set_settings           = tg3_set_settings,
8929         .get_drvinfo            = tg3_get_drvinfo,
8930         .get_regs_len           = tg3_get_regs_len,
8931         .get_regs               = tg3_get_regs,
8932         .get_wol                = tg3_get_wol,
8933         .set_wol                = tg3_set_wol,
8934         .get_msglevel           = tg3_get_msglevel,
8935         .set_msglevel           = tg3_set_msglevel,
8936         .nway_reset             = tg3_nway_reset,
8937         .get_link               = ethtool_op_get_link,
8938         .get_eeprom_len         = tg3_get_eeprom_len,
8939         .get_eeprom             = tg3_get_eeprom,
8940         .set_eeprom             = tg3_set_eeprom,
8941         .get_ringparam          = tg3_get_ringparam,
8942         .set_ringparam          = tg3_set_ringparam,
8943         .get_pauseparam         = tg3_get_pauseparam,
8944         .set_pauseparam         = tg3_set_pauseparam,
8945         .get_rx_csum            = tg3_get_rx_csum,
8946         .set_rx_csum            = tg3_set_rx_csum,
8947         .get_tx_csum            = ethtool_op_get_tx_csum,
8948         .set_tx_csum            = tg3_set_tx_csum,
8949         .get_sg                 = ethtool_op_get_sg,
8950         .set_sg                 = ethtool_op_set_sg,
8951 #if TG3_TSO_SUPPORT != 0
8952         .get_tso                = ethtool_op_get_tso,
8953         .set_tso                = tg3_set_tso,
8954 #endif
8955         .self_test_count        = tg3_get_test_count,
8956         .self_test              = tg3_self_test,
8957         .get_strings            = tg3_get_strings,
8958         .phys_id                = tg3_phys_id,
8959         .get_stats_count        = tg3_get_stats_count,
8960         .get_ethtool_stats      = tg3_get_ethtool_stats,
8961         .get_coalesce           = tg3_get_coalesce,
8962         .set_coalesce           = tg3_set_coalesce,
8963         .get_perm_addr          = ethtool_op_get_perm_addr,
8964 };
8965
8966 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8967 {
8968         u32 cursize, val, magic;
8969
8970         tp->nvram_size = EEPROM_CHIP_SIZE;
8971
8972         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8973                 return;
8974
8975         if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
8976                 return;
8977
8978         /*
8979          * Size the chip by reading offsets at increasing powers of two.
8980          * When we encounter our validation signature, we know the addressing
8981          * has wrapped around, and thus have our chip size.
8982          */
8983         cursize = 0x10;
8984
8985         while (cursize < tp->nvram_size) {
8986                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
8987                         return;
8988
8989                 if (val == magic)
8990                         break;
8991
8992                 cursize <<= 1;
8993         }
8994
8995         tp->nvram_size = cursize;
8996 }
8997
8998 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8999 {
9000         u32 val;
9001
9002         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9003                 return;
9004
9005         /* Selfboot format */
9006         if (val != TG3_EEPROM_MAGIC) {
9007                 tg3_get_eeprom_size(tp);
9008                 return;
9009         }
9010
9011         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9012                 if (val != 0) {
9013                         tp->nvram_size = (val >> 16) * 1024;
9014                         return;
9015                 }
9016         }
9017         tp->nvram_size = 0x20000;
9018 }
9019
9020 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9021 {
9022         u32 nvcfg1;
9023
9024         nvcfg1 = tr32(NVRAM_CFG1);
9025         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9026                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9027         }
9028         else {
9029                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9030                 tw32(NVRAM_CFG1, nvcfg1);
9031         }
9032
9033         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9034             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9035                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9036                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9037                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9038                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9039                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9040                                 break;
9041                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9042                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9043                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9044                                 break;
9045                         case FLASH_VENDOR_ATMEL_EEPROM:
9046                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9047                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9048                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9049                                 break;
9050                         case FLASH_VENDOR_ST:
9051                                 tp->nvram_jedecnum = JEDEC_ST;
9052                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9053                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9054                                 break;
9055                         case FLASH_VENDOR_SAIFUN:
9056                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9057                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9058                                 break;
9059                         case FLASH_VENDOR_SST_SMALL:
9060                         case FLASH_VENDOR_SST_LARGE:
9061                                 tp->nvram_jedecnum = JEDEC_SST;
9062                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9063                                 break;
9064                 }
9065         }
9066         else {
9067                 tp->nvram_jedecnum = JEDEC_ATMEL;
9068                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9069                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9070         }
9071 }
9072
9073 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9074 {
9075         u32 nvcfg1;
9076
9077         nvcfg1 = tr32(NVRAM_CFG1);
9078
9079         /* NVRAM protection for TPM */
9080         if (nvcfg1 & (1 << 27))
9081                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9082
9083         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9084                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9085                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9086                         tp->nvram_jedecnum = JEDEC_ATMEL;
9087                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9088                         break;
9089                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9090                         tp->nvram_jedecnum = JEDEC_ATMEL;
9091                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9092                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9093                         break;
9094                 case FLASH_5752VENDOR_ST_M45PE10:
9095                 case FLASH_5752VENDOR_ST_M45PE20:
9096                 case FLASH_5752VENDOR_ST_M45PE40:
9097                         tp->nvram_jedecnum = JEDEC_ST;
9098                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9099                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9100                         break;
9101         }
9102
9103         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9104                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9105                         case FLASH_5752PAGE_SIZE_256:
9106                                 tp->nvram_pagesize = 256;
9107                                 break;
9108                         case FLASH_5752PAGE_SIZE_512:
9109                                 tp->nvram_pagesize = 512;
9110                                 break;
9111                         case FLASH_5752PAGE_SIZE_1K:
9112                                 tp->nvram_pagesize = 1024;
9113                                 break;
9114                         case FLASH_5752PAGE_SIZE_2K:
9115                                 tp->nvram_pagesize = 2048;
9116                                 break;
9117                         case FLASH_5752PAGE_SIZE_4K:
9118                                 tp->nvram_pagesize = 4096;
9119                                 break;
9120                         case FLASH_5752PAGE_SIZE_264:
9121                                 tp->nvram_pagesize = 264;
9122                                 break;
9123                 }
9124         }
9125         else {
9126                 /* For eeprom, set pagesize to maximum eeprom size */
9127                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9128
9129                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9130                 tw32(NVRAM_CFG1, nvcfg1);
9131         }
9132 }
9133
9134 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9135 {
9136         u32 nvcfg1;
9137
9138         nvcfg1 = tr32(NVRAM_CFG1);
9139
9140         /* NVRAM protection for TPM */
9141         if (nvcfg1 & (1 << 27))
9142                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9143
9144         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9145                 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
9146                 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
9147                         tp->nvram_jedecnum = JEDEC_ATMEL;
9148                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9149                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9150
9151                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9152                         tw32(NVRAM_CFG1, nvcfg1);
9153                         break;
9154                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9155                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9156                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9157                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9158                 case FLASH_5755VENDOR_ATMEL_FLASH_4:
9159                         tp->nvram_jedecnum = JEDEC_ATMEL;
9160                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9161                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9162                         tp->nvram_pagesize = 264;
9163                         break;
9164                 case FLASH_5752VENDOR_ST_M45PE10:
9165                 case FLASH_5752VENDOR_ST_M45PE20:
9166                 case FLASH_5752VENDOR_ST_M45PE40:
9167                         tp->nvram_jedecnum = JEDEC_ST;
9168                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9169                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9170                         tp->nvram_pagesize = 256;
9171                         break;
9172         }
9173 }
9174
9175 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9176 {
9177         u32 nvcfg1;
9178
9179         nvcfg1 = tr32(NVRAM_CFG1);
9180
9181         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9182                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9183                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9184                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9185                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9186                         tp->nvram_jedecnum = JEDEC_ATMEL;
9187                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9188                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9189
9190                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9191                         tw32(NVRAM_CFG1, nvcfg1);
9192                         break;
9193                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9194                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9195                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9196                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9197                         tp->nvram_jedecnum = JEDEC_ATMEL;
9198                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9199                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9200                         tp->nvram_pagesize = 264;
9201                         break;
9202                 case FLASH_5752VENDOR_ST_M45PE10:
9203                 case FLASH_5752VENDOR_ST_M45PE20:
9204                 case FLASH_5752VENDOR_ST_M45PE40:
9205                         tp->nvram_jedecnum = JEDEC_ST;
9206                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9207                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9208                         tp->nvram_pagesize = 256;
9209                         break;
9210         }
9211 }
9212
9213 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9214 static void __devinit tg3_nvram_init(struct tg3 *tp)
9215 {
9216         int j;
9217
9218         tw32_f(GRC_EEPROM_ADDR,
9219              (EEPROM_ADDR_FSM_RESET |
9220               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9221                EEPROM_ADDR_CLKPERD_SHIFT)));
9222
9223         /* XXX schedule_timeout() ... */
9224         for (j = 0; j < 100; j++)
9225                 udelay(10);
9226
9227         /* Enable seeprom accesses. */
9228         tw32_f(GRC_LOCAL_CTRL,
9229              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9230         udelay(100);
9231
9232         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9233             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9234                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9235
9236                 if (tg3_nvram_lock(tp)) {
9237                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9238                                "tg3_nvram_init failed.\n", tp->dev->name);
9239                         return;
9240                 }
9241                 tg3_enable_nvram_access(tp);
9242
9243                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9244                         tg3_get_5752_nvram_info(tp);
9245                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9246                         tg3_get_5755_nvram_info(tp);
9247                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9248                         tg3_get_5787_nvram_info(tp);
9249                 else
9250                         tg3_get_nvram_info(tp);
9251
9252                 tg3_get_nvram_size(tp);
9253
9254                 tg3_disable_nvram_access(tp);
9255                 tg3_nvram_unlock(tp);
9256
9257         } else {
9258                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9259
9260                 tg3_get_eeprom_size(tp);
9261         }
9262 }
9263
9264 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9265                                         u32 offset, u32 *val)
9266 {
9267         u32 tmp;
9268         int i;
9269
9270         if (offset > EEPROM_ADDR_ADDR_MASK ||
9271             (offset % 4) != 0)
9272                 return -EINVAL;
9273
9274         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9275                                         EEPROM_ADDR_DEVID_MASK |
9276                                         EEPROM_ADDR_READ);
9277         tw32(GRC_EEPROM_ADDR,
9278              tmp |
9279              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9280              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9281               EEPROM_ADDR_ADDR_MASK) |
9282              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9283
9284         for (i = 0; i < 10000; i++) {
9285                 tmp = tr32(GRC_EEPROM_ADDR);
9286
9287                 if (tmp & EEPROM_ADDR_COMPLETE)
9288                         break;
9289                 udelay(100);
9290         }
9291         if (!(tmp & EEPROM_ADDR_COMPLETE))
9292                 return -EBUSY;
9293
9294         *val = tr32(GRC_EEPROM_DATA);
9295         return 0;
9296 }
9297
9298 #define NVRAM_CMD_TIMEOUT 10000
9299
9300 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9301 {
9302         int i;
9303
9304         tw32(NVRAM_CMD, nvram_cmd);
9305         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9306                 udelay(10);
9307                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9308                         udelay(10);
9309                         break;
9310                 }
9311         }
9312         if (i == NVRAM_CMD_TIMEOUT) {
9313                 return -EBUSY;
9314         }
9315         return 0;
9316 }
9317
9318 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9319 {
9320         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9321             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9322             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9323             (tp->nvram_jedecnum == JEDEC_ATMEL))
9324
9325                 addr = ((addr / tp->nvram_pagesize) <<
9326                         ATMEL_AT45DB0X1B_PAGE_POS) +
9327                        (addr % tp->nvram_pagesize);
9328
9329         return addr;
9330 }
9331
9332 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9333 {
9334         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9335             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9336             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9337             (tp->nvram_jedecnum == JEDEC_ATMEL))
9338
9339                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9340                         tp->nvram_pagesize) +
9341                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9342
9343         return addr;
9344 }
9345
9346 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9347 {
9348         int ret;
9349
9350         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9351                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9352
9353         offset = tg3_nvram_phys_addr(tp, offset);
9354
9355         if (offset > NVRAM_ADDR_MSK)
9356                 return -EINVAL;
9357
9358         ret = tg3_nvram_lock(tp);
9359         if (ret)
9360                 return ret;
9361
9362         tg3_enable_nvram_access(tp);
9363
9364         tw32(NVRAM_ADDR, offset);
9365         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9366                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9367
9368         if (ret == 0)
9369                 *val = swab32(tr32(NVRAM_RDDATA));
9370
9371         tg3_disable_nvram_access(tp);
9372
9373         tg3_nvram_unlock(tp);
9374
9375         return ret;
9376 }
9377
9378 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9379 {
9380         int err;
9381         u32 tmp;
9382
9383         err = tg3_nvram_read(tp, offset, &tmp);
9384         *val = swab32(tmp);
9385         return err;
9386 }
9387
9388 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9389                                     u32 offset, u32 len, u8 *buf)
9390 {
9391         int i, j, rc = 0;
9392         u32 val;
9393
9394         for (i = 0; i < len; i += 4) {
9395                 u32 addr, data;
9396
9397                 addr = offset + i;
9398
9399                 memcpy(&data, buf + i, 4);
9400
9401                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9402
9403                 val = tr32(GRC_EEPROM_ADDR);
9404                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9405
9406                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9407                         EEPROM_ADDR_READ);
9408                 tw32(GRC_EEPROM_ADDR, val |
9409                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9410                         (addr & EEPROM_ADDR_ADDR_MASK) |
9411                         EEPROM_ADDR_START |
9412                         EEPROM_ADDR_WRITE);
9413
9414                 for (j = 0; j < 10000; j++) {
9415                         val = tr32(GRC_EEPROM_ADDR);
9416
9417                         if (val & EEPROM_ADDR_COMPLETE)
9418                                 break;
9419                         udelay(100);
9420                 }
9421                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9422                         rc = -EBUSY;
9423                         break;
9424                 }
9425         }
9426
9427         return rc;
9428 }
9429
9430 /* offset and length are dword aligned */
9431 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9432                 u8 *buf)
9433 {
9434         int ret = 0;
9435         u32 pagesize = tp->nvram_pagesize;
9436         u32 pagemask = pagesize - 1;
9437         u32 nvram_cmd;
9438         u8 *tmp;
9439
9440         tmp = kmalloc(pagesize, GFP_KERNEL);
9441         if (tmp == NULL)
9442                 return -ENOMEM;
9443
9444         while (len) {
9445                 int j;
9446                 u32 phy_addr, page_off, size;
9447
9448                 phy_addr = offset & ~pagemask;
9449
9450                 for (j = 0; j < pagesize; j += 4) {
9451                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9452                                                 (u32 *) (tmp + j))))
9453                                 break;
9454                 }
9455                 if (ret)
9456                         break;
9457
9458                 page_off = offset & pagemask;
9459                 size = pagesize;
9460                 if (len < size)
9461                         size = len;
9462
9463                 len -= size;
9464
9465                 memcpy(tmp + page_off, buf, size);
9466
9467                 offset = offset + (pagesize - page_off);
9468
9469                 tg3_enable_nvram_access(tp);
9470
9471                 /*
9472                  * Before we can erase the flash page, we need
9473                  * to issue a special "write enable" command.
9474                  */
9475                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9476
9477                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9478                         break;
9479
9480                 /* Erase the target page */
9481                 tw32(NVRAM_ADDR, phy_addr);
9482
9483                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9484                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9485
9486                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9487                         break;
9488
9489                 /* Issue another write enable to start the write. */
9490                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9491
9492                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9493                         break;
9494
9495                 for (j = 0; j < pagesize; j += 4) {
9496                         u32 data;
9497
9498                         data = *((u32 *) (tmp + j));
9499                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9500
9501                         tw32(NVRAM_ADDR, phy_addr + j);
9502
9503                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9504                                 NVRAM_CMD_WR;
9505
9506                         if (j == 0)
9507                                 nvram_cmd |= NVRAM_CMD_FIRST;
9508                         else if (j == (pagesize - 4))
9509                                 nvram_cmd |= NVRAM_CMD_LAST;
9510
9511                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9512                                 break;
9513                 }
9514                 if (ret)
9515                         break;
9516         }
9517
9518         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9519         tg3_nvram_exec_cmd(tp, nvram_cmd);
9520
9521         kfree(tmp);
9522
9523         return ret;
9524 }
9525
9526 /* offset and length are dword aligned */
9527 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9528                 u8 *buf)
9529 {
9530         int i, ret = 0;
9531
9532         for (i = 0; i < len; i += 4, offset += 4) {
9533                 u32 data, page_off, phy_addr, nvram_cmd;
9534
9535                 memcpy(&data, buf + i, 4);
9536                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9537
9538                 page_off = offset % tp->nvram_pagesize;
9539
9540                 phy_addr = tg3_nvram_phys_addr(tp, offset);
9541
9542                 tw32(NVRAM_ADDR, phy_addr);
9543
9544                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9545
9546                 if ((page_off == 0) || (i == 0))
9547                         nvram_cmd |= NVRAM_CMD_FIRST;
9548                 if (page_off == (tp->nvram_pagesize - 4))
9549                         nvram_cmd |= NVRAM_CMD_LAST;
9550
9551                 if (i == (len - 4))
9552                         nvram_cmd |= NVRAM_CMD_LAST;
9553
9554                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9555                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9556                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9557                     (tp->nvram_jedecnum == JEDEC_ST) &&
9558                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9559
9560                         if ((ret = tg3_nvram_exec_cmd(tp,
9561                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9562                                 NVRAM_CMD_DONE)))
9563
9564                                 break;
9565                 }
9566                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9567                         /* We always do complete word writes to eeprom. */
9568                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9569                 }
9570
9571                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9572                         break;
9573         }
9574         return ret;
9575 }
9576
9577 /* offset and length are dword aligned */
9578 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9579 {
9580         int ret;
9581
9582         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9583                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9584                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9585                 udelay(40);
9586         }
9587
9588         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9589                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9590         }
9591         else {
9592                 u32 grc_mode;
9593
9594                 ret = tg3_nvram_lock(tp);
9595                 if (ret)
9596                         return ret;
9597
9598                 tg3_enable_nvram_access(tp);
9599                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9600                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9601                         tw32(NVRAM_WRITE1, 0x406);
9602
9603                 grc_mode = tr32(GRC_MODE);
9604                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9605
9606                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9607                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9608
9609                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9610                                 buf);
9611                 }
9612                 else {
9613                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9614                                 buf);
9615                 }
9616
9617                 grc_mode = tr32(GRC_MODE);
9618                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9619
9620                 tg3_disable_nvram_access(tp);
9621                 tg3_nvram_unlock(tp);
9622         }
9623
9624         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9625                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9626                 udelay(40);
9627         }
9628
9629         return ret;
9630 }
9631
9632 struct subsys_tbl_ent {
9633         u16 subsys_vendor, subsys_devid;
9634         u32 phy_id;
9635 };
9636
9637 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9638         /* Broadcom boards. */
9639         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9640         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9641         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9642         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9643         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9644         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9645         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9646         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9647         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9648         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9649         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9650
9651         /* 3com boards. */
9652         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9653         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9654         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9655         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9656         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9657
9658         /* DELL boards. */
9659         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9660         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9661         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9662         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9663
9664         /* Compaq boards. */
9665         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9666         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9667         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9668         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9669         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9670
9671         /* IBM boards. */
9672         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9673 };
9674
9675 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9676 {
9677         int i;
9678
9679         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9680                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9681                      tp->pdev->subsystem_vendor) &&
9682                     (subsys_id_to_phy_id[i].subsys_devid ==
9683                      tp->pdev->subsystem_device))
9684                         return &subsys_id_to_phy_id[i];
9685         }
9686         return NULL;
9687 }
9688
9689 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9690 {
9691         u32 val;
9692         u16 pmcsr;
9693
9694         /* On some early chips the SRAM cannot be accessed in D3hot state,
9695          * so need make sure we're in D0.
9696          */
9697         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9698         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9699         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9700         msleep(1);
9701
9702         /* Make sure register accesses (indirect or otherwise)
9703          * will function correctly.
9704          */
9705         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9706                                tp->misc_host_ctrl);
9707
9708         /* The memory arbiter has to be enabled in order for SRAM accesses
9709          * to succeed.  Normally on powerup the tg3 chip firmware will make
9710          * sure it is enabled, but other entities such as system netboot
9711          * code might disable it.
9712          */
9713         val = tr32(MEMARB_MODE);
9714         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9715
9716         tp->phy_id = PHY_ID_INVALID;
9717         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9718
9719         /* Assume an onboard device by default.  */
9720         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9721
9722         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9723         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9724                 u32 nic_cfg, led_cfg;
9725                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9726                 int eeprom_phy_serdes = 0;
9727
9728                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9729                 tp->nic_sram_data_cfg = nic_cfg;
9730
9731                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9732                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9733                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9734                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9735                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9736                     (ver > 0) && (ver < 0x100))
9737                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9738
9739                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9740                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9741                         eeprom_phy_serdes = 1;
9742
9743                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9744                 if (nic_phy_id != 0) {
9745                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9746                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9747
9748                         eeprom_phy_id  = (id1 >> 16) << 10;
9749                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9750                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9751                 } else
9752                         eeprom_phy_id = 0;
9753
9754                 tp->phy_id = eeprom_phy_id;
9755                 if (eeprom_phy_serdes) {
9756                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9757                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9758                         else
9759                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9760                 }
9761
9762                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9763                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9764                                     SHASTA_EXT_LED_MODE_MASK);
9765                 else
9766                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9767
9768                 switch (led_cfg) {
9769                 default:
9770                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9771                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9772                         break;
9773
9774                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9775                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9776                         break;
9777
9778                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9779                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9780
9781                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9782                          * read on some older 5700/5701 bootcode.
9783                          */
9784                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9785                             ASIC_REV_5700 ||
9786                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9787                             ASIC_REV_5701)
9788                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9789
9790                         break;
9791
9792                 case SHASTA_EXT_LED_SHARED:
9793                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9794                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9795                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9796                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9797                                                  LED_CTRL_MODE_PHY_2);
9798                         break;
9799
9800                 case SHASTA_EXT_LED_MAC:
9801                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9802                         break;
9803
9804                 case SHASTA_EXT_LED_COMBO:
9805                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9806                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9807                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9808                                                  LED_CTRL_MODE_PHY_2);
9809                         break;
9810
9811                 };
9812
9813                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9814                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9815                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9816                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9817
9818                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
9819                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9820                 else
9821                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9822
9823                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9824                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9825                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9826                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9827                 }
9828                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9829                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9830
9831                 if (cfg2 & (1 << 17))
9832                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9833
9834                 /* serdes signal pre-emphasis in register 0x590 set by */
9835                 /* bootcode if bit 18 is set */
9836                 if (cfg2 & (1 << 18))
9837                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9838         }
9839 }
9840
9841 static int __devinit tg3_phy_probe(struct tg3 *tp)
9842 {
9843         u32 hw_phy_id_1, hw_phy_id_2;
9844         u32 hw_phy_id, hw_phy_id_masked;
9845         int err;
9846
9847         /* Reading the PHY ID register can conflict with ASF
9848          * firwmare access to the PHY hardware.
9849          */
9850         err = 0;
9851         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9852                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9853         } else {
9854                 /* Now read the physical PHY_ID from the chip and verify
9855                  * that it is sane.  If it doesn't look good, we fall back
9856                  * to either the hard-coded table based PHY_ID and failing
9857                  * that the value found in the eeprom area.
9858                  */
9859                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9860                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9861
9862                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9863                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9864                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9865
9866                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9867         }
9868
9869         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9870                 tp->phy_id = hw_phy_id;
9871                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9872                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9873                 else
9874                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9875         } else {
9876                 if (tp->phy_id != PHY_ID_INVALID) {
9877                         /* Do nothing, phy ID already set up in
9878                          * tg3_get_eeprom_hw_cfg().
9879                          */
9880                 } else {
9881                         struct subsys_tbl_ent *p;
9882
9883                         /* No eeprom signature?  Try the hardcoded
9884                          * subsys device table.
9885                          */
9886                         p = lookup_by_subsys(tp);
9887                         if (!p)
9888                                 return -ENODEV;
9889
9890                         tp->phy_id = p->phy_id;
9891                         if (!tp->phy_id ||
9892                             tp->phy_id == PHY_ID_BCM8002)
9893                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9894                 }
9895         }
9896
9897         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9898             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9899                 u32 bmsr, adv_reg, tg3_ctrl;
9900
9901                 tg3_readphy(tp, MII_BMSR, &bmsr);
9902                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9903                     (bmsr & BMSR_LSTATUS))
9904                         goto skip_phy_reset;
9905
9906                 err = tg3_phy_reset(tp);
9907                 if (err)
9908                         return err;
9909
9910                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9911                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9912                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9913                 tg3_ctrl = 0;
9914                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9915                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9916                                     MII_TG3_CTRL_ADV_1000_FULL);
9917                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9918                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9919                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9920                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9921                 }
9922
9923                 if (!tg3_copper_is_advertising_all(tp)) {
9924                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9925
9926                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9927                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9928
9929                         tg3_writephy(tp, MII_BMCR,
9930                                      BMCR_ANENABLE | BMCR_ANRESTART);
9931                 }
9932                 tg3_phy_set_wirespeed(tp);
9933
9934                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9935                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9936                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9937         }
9938
9939 skip_phy_reset:
9940         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9941                 err = tg3_init_5401phy_dsp(tp);
9942                 if (err)
9943                         return err;
9944         }
9945
9946         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9947                 err = tg3_init_5401phy_dsp(tp);
9948         }
9949
9950         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9951                 tp->link_config.advertising =
9952                         (ADVERTISED_1000baseT_Half |
9953                          ADVERTISED_1000baseT_Full |
9954                          ADVERTISED_Autoneg |
9955                          ADVERTISED_FIBRE);
9956         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9957                 tp->link_config.advertising &=
9958                         ~(ADVERTISED_1000baseT_Half |
9959                           ADVERTISED_1000baseT_Full);
9960
9961         return err;
9962 }
9963
9964 static void __devinit tg3_read_partno(struct tg3 *tp)
9965 {
9966         unsigned char vpd_data[256];
9967         int i;
9968         u32 magic;
9969
9970         if (tg3_nvram_read_swab(tp, 0x0, &magic))
9971                 goto out_not_found;
9972
9973         if (magic == TG3_EEPROM_MAGIC) {
9974                 for (i = 0; i < 256; i += 4) {
9975                         u32 tmp;
9976
9977                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9978                                 goto out_not_found;
9979
9980                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
9981                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
9982                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9983                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9984                 }
9985         } else {
9986                 int vpd_cap;
9987
9988                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
9989                 for (i = 0; i < 256; i += 4) {
9990                         u32 tmp, j = 0;
9991                         u16 tmp16;
9992
9993                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
9994                                               i);
9995                         while (j++ < 100) {
9996                                 pci_read_config_word(tp->pdev, vpd_cap +
9997                                                      PCI_VPD_ADDR, &tmp16);
9998                                 if (tmp16 & 0x8000)
9999                                         break;
10000                                 msleep(1);
10001                         }
10002                         if (!(tmp16 & 0x8000))
10003                                 goto out_not_found;
10004
10005                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10006                                               &tmp);
10007                         tmp = cpu_to_le32(tmp);
10008                         memcpy(&vpd_data[i], &tmp, 4);
10009                 }
10010         }
10011
10012         /* Now parse and find the part number. */
10013         for (i = 0; i < 256; ) {
10014                 unsigned char val = vpd_data[i];
10015                 int block_end;
10016
10017                 if (val == 0x82 || val == 0x91) {
10018                         i = (i + 3 +
10019                              (vpd_data[i + 1] +
10020                               (vpd_data[i + 2] << 8)));
10021                         continue;
10022                 }
10023
10024                 if (val != 0x90)
10025                         goto out_not_found;
10026
10027                 block_end = (i + 3 +
10028                              (vpd_data[i + 1] +
10029                               (vpd_data[i + 2] << 8)));
10030                 i += 3;
10031                 while (i < block_end) {
10032                         if (vpd_data[i + 0] == 'P' &&
10033                             vpd_data[i + 1] == 'N') {
10034                                 int partno_len = vpd_data[i + 2];
10035
10036                                 if (partno_len > 24)
10037                                         goto out_not_found;
10038
10039                                 memcpy(tp->board_part_number,
10040                                        &vpd_data[i + 3],
10041                                        partno_len);
10042
10043                                 /* Success. */
10044                                 return;
10045                         }
10046                 }
10047
10048                 /* Part number not found. */
10049                 goto out_not_found;
10050         }
10051
10052 out_not_found:
10053         strcpy(tp->board_part_number, "none");
10054 }
10055
10056 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10057 {
10058         u32 val, offset, start;
10059
10060         if (tg3_nvram_read_swab(tp, 0, &val))
10061                 return;
10062
10063         if (val != TG3_EEPROM_MAGIC)
10064                 return;
10065
10066         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10067             tg3_nvram_read_swab(tp, 0x4, &start))
10068                 return;
10069
10070         offset = tg3_nvram_logical_addr(tp, offset);
10071         if (tg3_nvram_read_swab(tp, offset, &val))
10072                 return;
10073
10074         if ((val & 0xfc000000) == 0x0c000000) {
10075                 u32 ver_offset, addr;
10076                 int i;
10077
10078                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10079                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10080                         return;
10081
10082                 if (val != 0)
10083                         return;
10084
10085                 addr = offset + ver_offset - start;
10086                 for (i = 0; i < 16; i += 4) {
10087                         if (tg3_nvram_read(tp, addr + i, &val))
10088                                 return;
10089
10090                         val = cpu_to_le32(val);
10091                         memcpy(tp->fw_ver + i, &val, 4);
10092                 }
10093         }
10094 }
10095
10096 static int __devinit tg3_get_invariants(struct tg3 *tp)
10097 {
10098         static struct pci_device_id write_reorder_chipsets[] = {
10099                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10100                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10101                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10102                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10103                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10104                              PCI_DEVICE_ID_VIA_8385_0) },
10105                 { },
10106         };
10107         u32 misc_ctrl_reg;
10108         u32 cacheline_sz_reg;
10109         u32 pci_state_reg, grc_misc_cfg;
10110         u32 val;
10111         u16 pci_cmd;
10112         int err;
10113
10114         /* Force memory write invalidate off.  If we leave it on,
10115          * then on 5700_BX chips we have to enable a workaround.
10116          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10117          * to match the cacheline size.  The Broadcom driver have this
10118          * workaround but turns MWI off all the times so never uses
10119          * it.  This seems to suggest that the workaround is insufficient.
10120          */
10121         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10122         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10123         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10124
10125         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10126          * has the register indirect write enable bit set before
10127          * we try to access any of the MMIO registers.  It is also
10128          * critical that the PCI-X hw workaround situation is decided
10129          * before that as well.
10130          */
10131         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10132                               &misc_ctrl_reg);
10133
10134         tp->pci_chip_rev_id = (misc_ctrl_reg >>
10135                                MISC_HOST_CTRL_CHIPREV_SHIFT);
10136
10137         /* Wrong chip ID in 5752 A0. This code can be removed later
10138          * as A0 is not in production.
10139          */
10140         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10141                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10142
10143         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10144          * we need to disable memory and use config. cycles
10145          * only to access all registers. The 5702/03 chips
10146          * can mistakenly decode the special cycles from the
10147          * ICH chipsets as memory write cycles, causing corruption
10148          * of register and memory space. Only certain ICH bridges
10149          * will drive special cycles with non-zero data during the
10150          * address phase which can fall within the 5703's address
10151          * range. This is not an ICH bug as the PCI spec allows
10152          * non-zero address during special cycles. However, only
10153          * these ICH bridges are known to drive non-zero addresses
10154          * during special cycles.
10155          *
10156          * Since special cycles do not cross PCI bridges, we only
10157          * enable this workaround if the 5703 is on the secondary
10158          * bus of these ICH bridges.
10159          */
10160         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10161             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10162                 static struct tg3_dev_id {
10163                         u32     vendor;
10164                         u32     device;
10165                         u32     rev;
10166                 } ich_chipsets[] = {
10167                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10168                           PCI_ANY_ID },
10169                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10170                           PCI_ANY_ID },
10171                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10172                           0xa },
10173                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10174                           PCI_ANY_ID },
10175                         { },
10176                 };
10177                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10178                 struct pci_dev *bridge = NULL;
10179
10180                 while (pci_id->vendor != 0) {
10181                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10182                                                 bridge);
10183                         if (!bridge) {
10184                                 pci_id++;
10185                                 continue;
10186                         }
10187                         if (pci_id->rev != PCI_ANY_ID) {
10188                                 u8 rev;
10189
10190                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
10191                                                      &rev);
10192                                 if (rev > pci_id->rev)
10193                                         continue;
10194                         }
10195                         if (bridge->subordinate &&
10196                             (bridge->subordinate->number ==
10197                              tp->pdev->bus->number)) {
10198
10199                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10200                                 pci_dev_put(bridge);
10201                                 break;
10202                         }
10203                 }
10204         }
10205
10206         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10207          * DMA addresses > 40-bit. This bridge may have other additional
10208          * 57xx devices behind it in some 4-port NIC designs for example.
10209          * Any tg3 device found behind the bridge will also need the 40-bit
10210          * DMA workaround.
10211          */
10212         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10213             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10214                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10215                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10216                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10217         }
10218         else {
10219                 struct pci_dev *bridge = NULL;
10220
10221                 do {
10222                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10223                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10224                                                 bridge);
10225                         if (bridge && bridge->subordinate &&
10226                             (bridge->subordinate->number <=
10227                              tp->pdev->bus->number) &&
10228                             (bridge->subordinate->subordinate >=
10229                              tp->pdev->bus->number)) {
10230                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10231                                 pci_dev_put(bridge);
10232                                 break;
10233                         }
10234                 } while (bridge);
10235         }
10236
10237         /* Initialize misc host control in PCI block. */
10238         tp->misc_host_ctrl |= (misc_ctrl_reg &
10239                                MISC_HOST_CTRL_CHIPREV);
10240         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10241                                tp->misc_host_ctrl);
10242
10243         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10244                               &cacheline_sz_reg);
10245
10246         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10247         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10248         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10249         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10250
10251         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10252             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10253             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10254             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10255             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10256                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10257
10258         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10259             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10260                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10261
10262         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10263                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10264                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10265                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10266                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10267                 } else {
10268                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 |
10269                                           TG3_FLG2_HW_TSO_1_BUG;
10270                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10271                                 ASIC_REV_5750 &&
10272                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
10273                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_1_BUG;
10274                 }
10275         }
10276
10277         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10278             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10279             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10280             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10281             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
10282                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10283
10284         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10285                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10286
10287         /* If we have an AMD 762 or VIA K8T800 chipset, write
10288          * reordering to the mailbox registers done by the host
10289          * controller can cause major troubles.  We read back from
10290          * every mailbox register write to force the writes to be
10291          * posted to the chip in order.
10292          */
10293         if (pci_dev_present(write_reorder_chipsets) &&
10294             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10295                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10296
10297         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10298             tp->pci_lat_timer < 64) {
10299                 tp->pci_lat_timer = 64;
10300
10301                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10302                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10303                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10304                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10305
10306                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10307                                        cacheline_sz_reg);
10308         }
10309
10310         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10311                               &pci_state_reg);
10312
10313         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10314                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10315
10316                 /* If this is a 5700 BX chipset, and we are in PCI-X
10317                  * mode, enable register write workaround.
10318                  *
10319                  * The workaround is to use indirect register accesses
10320                  * for all chip writes not to mailbox registers.
10321                  */
10322                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10323                         u32 pm_reg;
10324                         u16 pci_cmd;
10325
10326                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10327
10328                         /* The chip can have it's power management PCI config
10329                          * space registers clobbered due to this bug.
10330                          * So explicitly force the chip into D0 here.
10331                          */
10332                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10333                                               &pm_reg);
10334                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10335                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10336                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10337                                                pm_reg);
10338
10339                         /* Also, force SERR#/PERR# in PCI command. */
10340                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10341                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10342                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10343                 }
10344         }
10345
10346         /* 5700 BX chips need to have their TX producer index mailboxes
10347          * written twice to workaround a bug.
10348          */
10349         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10350                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10351
10352         /* Back to back register writes can cause problems on this chip,
10353          * the workaround is to read back all reg writes except those to
10354          * mailbox regs.  See tg3_write_indirect_reg32().
10355          *
10356          * PCI Express 5750_A0 rev chips need this workaround too.
10357          */
10358         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10359             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10360              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10361                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10362
10363         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10364                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10365         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10366                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10367
10368         /* Chip-specific fixup from Broadcom driver */
10369         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10370             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10371                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10372                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10373         }
10374
10375         /* Default fast path register access methods */
10376         tp->read32 = tg3_read32;
10377         tp->write32 = tg3_write32;
10378         tp->read32_mbox = tg3_read32;
10379         tp->write32_mbox = tg3_write32;
10380         tp->write32_tx_mbox = tg3_write32;
10381         tp->write32_rx_mbox = tg3_write32;
10382
10383         /* Various workaround register access methods */
10384         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10385                 tp->write32 = tg3_write_indirect_reg32;
10386         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10387                 tp->write32 = tg3_write_flush_reg32;
10388
10389         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10390             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10391                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10392                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10393                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10394         }
10395
10396         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10397                 tp->read32 = tg3_read_indirect_reg32;
10398                 tp->write32 = tg3_write_indirect_reg32;
10399                 tp->read32_mbox = tg3_read_indirect_mbox;
10400                 tp->write32_mbox = tg3_write_indirect_mbox;
10401                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10402                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10403
10404                 iounmap(tp->regs);
10405                 tp->regs = NULL;
10406
10407                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10408                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10409                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10410         }
10411
10412         if (tp->write32 == tg3_write_indirect_reg32 ||
10413             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10414              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10415               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
10416                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10417
10418         /* Get eeprom hw config before calling tg3_set_power_state().
10419          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10420          * determined before calling tg3_set_power_state() so that
10421          * we know whether or not to switch out of Vaux power.
10422          * When the flag is set, it means that GPIO1 is used for eeprom
10423          * write protect and also implies that it is a LOM where GPIOs
10424          * are not used to switch power.
10425          */
10426         tg3_get_eeprom_hw_cfg(tp);
10427
10428         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10429          * GPIO1 driven high will bring 5700's external PHY out of reset.
10430          * It is also used as eeprom write protect on LOMs.
10431          */
10432         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10433         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10434             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10435                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10436                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10437         /* Unused GPIO3 must be driven as output on 5752 because there
10438          * are no pull-up resistors on unused GPIO pins.
10439          */
10440         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10441                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10442
10443         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10444                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10445
10446         /* Force the chip into D0. */
10447         err = tg3_set_power_state(tp, PCI_D0);
10448         if (err) {
10449                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10450                        pci_name(tp->pdev));
10451                 return err;
10452         }
10453
10454         /* 5700 B0 chips do not support checksumming correctly due
10455          * to hardware bugs.
10456          */
10457         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10458                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10459
10460         /* Derive initial jumbo mode from MTU assigned in
10461          * ether_setup() via the alloc_etherdev() call
10462          */
10463         if (tp->dev->mtu > ETH_DATA_LEN &&
10464             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10465                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10466
10467         /* Determine WakeOnLan speed to use. */
10468         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10469             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10470             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10471             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10472                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10473         } else {
10474                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10475         }
10476
10477         /* A few boards don't want Ethernet@WireSpeed phy feature */
10478         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10479             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10480              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10481              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10482             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10483                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10484
10485         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10486             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10487                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10488         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10489                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10490
10491         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10492                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10493                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10494                         tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10495                 else
10496                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10497         }
10498
10499         tp->coalesce_mode = 0;
10500         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10501             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10502                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10503
10504         /* Initialize MAC MI mode, polling disabled. */
10505         tw32_f(MAC_MI_MODE, tp->mi_mode);
10506         udelay(80);
10507
10508         /* Initialize data/descriptor byte/word swapping. */
10509         val = tr32(GRC_MODE);
10510         val &= GRC_MODE_HOST_STACKUP;
10511         tw32(GRC_MODE, val | tp->grc_mode);
10512
10513         tg3_switch_clocks(tp);
10514
10515         /* Clear this out for sanity. */
10516         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10517
10518         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10519                               &pci_state_reg);
10520         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10521             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10522                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10523
10524                 if (chiprevid == CHIPREV_ID_5701_A0 ||
10525                     chiprevid == CHIPREV_ID_5701_B0 ||
10526                     chiprevid == CHIPREV_ID_5701_B2 ||
10527                     chiprevid == CHIPREV_ID_5701_B5) {
10528                         void __iomem *sram_base;
10529
10530                         /* Write some dummy words into the SRAM status block
10531                          * area, see if it reads back correctly.  If the return
10532                          * value is bad, force enable the PCIX workaround.
10533                          */
10534                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10535
10536                         writel(0x00000000, sram_base);
10537                         writel(0x00000000, sram_base + 4);
10538                         writel(0xffffffff, sram_base + 4);
10539                         if (readl(sram_base) != 0x00000000)
10540                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10541                 }
10542         }
10543
10544         udelay(50);
10545         tg3_nvram_init(tp);
10546
10547         grc_misc_cfg = tr32(GRC_MISC_CFG);
10548         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10549
10550         /* Broadcom's driver says that CIOBE multisplit has a bug */
10551 #if 0
10552         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10553             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10554                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10555                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10556         }
10557 #endif
10558         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10559             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10560              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10561                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10562
10563         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10564             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10565                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10566         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10567                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10568                                       HOSTCC_MODE_CLRTICK_TXBD);
10569
10570                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10571                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10572                                        tp->misc_host_ctrl);
10573         }
10574
10575         /* these are limited to 10/100 only */
10576         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10577              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10578             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10579              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10580              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10581               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10582               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10583             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10584              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10585               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10586                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10587
10588         err = tg3_phy_probe(tp);
10589         if (err) {
10590                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10591                        pci_name(tp->pdev), err);
10592                 /* ... but do not return immediately ... */
10593         }
10594
10595         tg3_read_partno(tp);
10596         tg3_read_fw_ver(tp);
10597
10598         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10599                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10600         } else {
10601                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10602                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10603                 else
10604                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10605         }
10606
10607         /* 5700 {AX,BX} chips have a broken status block link
10608          * change bit implementation, so we must use the
10609          * status register in those cases.
10610          */
10611         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10612                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10613         else
10614                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10615
10616         /* The led_ctrl is set during tg3_phy_probe, here we might
10617          * have to force the link status polling mechanism based
10618          * upon subsystem IDs.
10619          */
10620         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10621             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10622                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10623                                   TG3_FLAG_USE_LINKCHG_REG);
10624         }
10625
10626         /* For all SERDES we poll the MAC status register. */
10627         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10628                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10629         else
10630                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10631
10632         /* All chips before 5787 can get confused if TX buffers
10633          * straddle the 4GB address boundary in some cases.
10634          */
10635         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10636             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10637                 tp->dev->hard_start_xmit = tg3_start_xmit;
10638         else
10639                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10640
10641         tp->rx_offset = 2;
10642         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10643             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10644                 tp->rx_offset = 0;
10645
10646         tp->rx_std_max_post = TG3_RX_RING_SIZE;
10647
10648         /* Increment the rx prod index on the rx std ring by at most
10649          * 8 for these chips to workaround hw errata.
10650          */
10651         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10652             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10653             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10654                 tp->rx_std_max_post = 8;
10655
10656         /* By default, disable wake-on-lan.  User can change this
10657          * using ETHTOOL_SWOL.
10658          */
10659         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10660
10661         return err;
10662 }
10663
10664 #ifdef CONFIG_SPARC64
10665 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10666 {
10667         struct net_device *dev = tp->dev;
10668         struct pci_dev *pdev = tp->pdev;
10669         struct pcidev_cookie *pcp = pdev->sysdata;
10670
10671         if (pcp != NULL) {
10672                 unsigned char *addr;
10673                 int len;
10674
10675                 addr = of_get_property(pcp->prom_node, "local-mac-address",
10676                                         &len);
10677                 if (addr && len == 6) {
10678                         memcpy(dev->dev_addr, addr, 6);
10679                         memcpy(dev->perm_addr, dev->dev_addr, 6);
10680                         return 0;
10681                 }
10682         }
10683         return -ENODEV;
10684 }
10685
10686 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10687 {
10688         struct net_device *dev = tp->dev;
10689
10690         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10691         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10692         return 0;
10693 }
10694 #endif
10695
10696 static int __devinit tg3_get_device_address(struct tg3 *tp)
10697 {
10698         struct net_device *dev = tp->dev;
10699         u32 hi, lo, mac_offset;
10700         int addr_ok = 0;
10701
10702 #ifdef CONFIG_SPARC64
10703         if (!tg3_get_macaddr_sparc(tp))
10704                 return 0;
10705 #endif
10706
10707         mac_offset = 0x7c;
10708         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10709             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10710                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10711                         mac_offset = 0xcc;
10712                 if (tg3_nvram_lock(tp))
10713                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10714                 else
10715                         tg3_nvram_unlock(tp);
10716         }
10717
10718         /* First try to get it from MAC address mailbox. */
10719         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10720         if ((hi >> 16) == 0x484b) {
10721                 dev->dev_addr[0] = (hi >>  8) & 0xff;
10722                 dev->dev_addr[1] = (hi >>  0) & 0xff;
10723
10724                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10725                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10726                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10727                 dev->dev_addr[4] = (lo >>  8) & 0xff;
10728                 dev->dev_addr[5] = (lo >>  0) & 0xff;
10729
10730                 /* Some old bootcode may report a 0 MAC address in SRAM */
10731                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10732         }
10733         if (!addr_ok) {
10734                 /* Next, try NVRAM. */
10735                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10736                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10737                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
10738                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
10739                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
10740                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
10741                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
10742                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
10743                 }
10744                 /* Finally just fetch it out of the MAC control regs. */
10745                 else {
10746                         hi = tr32(MAC_ADDR_0_HIGH);
10747                         lo = tr32(MAC_ADDR_0_LOW);
10748
10749                         dev->dev_addr[5] = lo & 0xff;
10750                         dev->dev_addr[4] = (lo >> 8) & 0xff;
10751                         dev->dev_addr[3] = (lo >> 16) & 0xff;
10752                         dev->dev_addr[2] = (lo >> 24) & 0xff;
10753                         dev->dev_addr[1] = hi & 0xff;
10754                         dev->dev_addr[0] = (hi >> 8) & 0xff;
10755                 }
10756         }
10757
10758         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10759 #ifdef CONFIG_SPARC64
10760                 if (!tg3_get_default_macaddr_sparc(tp))
10761                         return 0;
10762 #endif
10763                 return -EINVAL;
10764         }
10765         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10766         return 0;
10767 }
10768
10769 #define BOUNDARY_SINGLE_CACHELINE       1
10770 #define BOUNDARY_MULTI_CACHELINE        2
10771
10772 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10773 {
10774         int cacheline_size;
10775         u8 byte;
10776         int goal;
10777
10778         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10779         if (byte == 0)
10780                 cacheline_size = 1024;
10781         else
10782                 cacheline_size = (int) byte * 4;
10783
10784         /* On 5703 and later chips, the boundary bits have no
10785          * effect.
10786          */
10787         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10788             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10789             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10790                 goto out;
10791
10792 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10793         goal = BOUNDARY_MULTI_CACHELINE;
10794 #else
10795 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10796         goal = BOUNDARY_SINGLE_CACHELINE;
10797 #else
10798         goal = 0;
10799 #endif
10800 #endif
10801
10802         if (!goal)
10803                 goto out;
10804
10805         /* PCI controllers on most RISC systems tend to disconnect
10806          * when a device tries to burst across a cache-line boundary.
10807          * Therefore, letting tg3 do so just wastes PCI bandwidth.
10808          *
10809          * Unfortunately, for PCI-E there are only limited
10810          * write-side controls for this, and thus for reads
10811          * we will still get the disconnects.  We'll also waste
10812          * these PCI cycles for both read and write for chips
10813          * other than 5700 and 5701 which do not implement the
10814          * boundary bits.
10815          */
10816         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10817             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10818                 switch (cacheline_size) {
10819                 case 16:
10820                 case 32:
10821                 case 64:
10822                 case 128:
10823                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10824                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10825                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10826                         } else {
10827                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10828                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10829                         }
10830                         break;
10831
10832                 case 256:
10833                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10834                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10835                         break;
10836
10837                 default:
10838                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10839                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10840                         break;
10841                 };
10842         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10843                 switch (cacheline_size) {
10844                 case 16:
10845                 case 32:
10846                 case 64:
10847                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10848                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10849                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10850                                 break;
10851                         }
10852                         /* fallthrough */
10853                 case 128:
10854                 default:
10855                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10856                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10857                         break;
10858                 };
10859         } else {
10860                 switch (cacheline_size) {
10861                 case 16:
10862                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10863                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10864                                         DMA_RWCTRL_WRITE_BNDRY_16);
10865                                 break;
10866                         }
10867                         /* fallthrough */
10868                 case 32:
10869                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10870                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10871                                         DMA_RWCTRL_WRITE_BNDRY_32);
10872                                 break;
10873                         }
10874                         /* fallthrough */
10875                 case 64:
10876                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10877                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10878                                         DMA_RWCTRL_WRITE_BNDRY_64);
10879                                 break;
10880                         }
10881                         /* fallthrough */
10882                 case 128:
10883                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10884                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10885                                         DMA_RWCTRL_WRITE_BNDRY_128);
10886                                 break;
10887                         }
10888                         /* fallthrough */
10889                 case 256:
10890                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10891                                 DMA_RWCTRL_WRITE_BNDRY_256);
10892                         break;
10893                 case 512:
10894                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10895                                 DMA_RWCTRL_WRITE_BNDRY_512);
10896                         break;
10897                 case 1024:
10898                 default:
10899                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10900                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10901                         break;
10902                 };
10903         }
10904
10905 out:
10906         return val;
10907 }
10908
10909 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10910 {
10911         struct tg3_internal_buffer_desc test_desc;
10912         u32 sram_dma_descs;
10913         int i, ret;
10914
10915         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10916
10917         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10918         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10919         tw32(RDMAC_STATUS, 0);
10920         tw32(WDMAC_STATUS, 0);
10921
10922         tw32(BUFMGR_MODE, 0);
10923         tw32(FTQ_RESET, 0);
10924
10925         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10926         test_desc.addr_lo = buf_dma & 0xffffffff;
10927         test_desc.nic_mbuf = 0x00002100;
10928         test_desc.len = size;
10929
10930         /*
10931          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10932          * the *second* time the tg3 driver was getting loaded after an
10933          * initial scan.
10934          *
10935          * Broadcom tells me:
10936          *   ...the DMA engine is connected to the GRC block and a DMA
10937          *   reset may affect the GRC block in some unpredictable way...
10938          *   The behavior of resets to individual blocks has not been tested.
10939          *
10940          * Broadcom noted the GRC reset will also reset all sub-components.
10941          */
10942         if (to_device) {
10943                 test_desc.cqid_sqid = (13 << 8) | 2;
10944
10945                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10946                 udelay(40);
10947         } else {
10948                 test_desc.cqid_sqid = (16 << 8) | 7;
10949
10950                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10951                 udelay(40);
10952         }
10953         test_desc.flags = 0x00000005;
10954
10955         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10956                 u32 val;
10957
10958                 val = *(((u32 *)&test_desc) + i);
10959                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10960                                        sram_dma_descs + (i * sizeof(u32)));
10961                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10962         }
10963         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10964
10965         if (to_device) {
10966                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10967         } else {
10968                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10969         }
10970
10971         ret = -ENODEV;
10972         for (i = 0; i < 40; i++) {
10973                 u32 val;
10974
10975                 if (to_device)
10976                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10977                 else
10978                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10979                 if ((val & 0xffff) == sram_dma_descs) {
10980                         ret = 0;
10981                         break;
10982                 }
10983
10984                 udelay(100);
10985         }
10986
10987         return ret;
10988 }
10989
10990 #define TEST_BUFFER_SIZE        0x2000
10991
10992 static int __devinit tg3_test_dma(struct tg3 *tp)
10993 {
10994         dma_addr_t buf_dma;
10995         u32 *buf, saved_dma_rwctrl;
10996         int ret;
10997
10998         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10999         if (!buf) {
11000                 ret = -ENOMEM;
11001                 goto out_nofree;
11002         }
11003
11004         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11005                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11006
11007         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11008
11009         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11010                 /* DMA read watermark not used on PCIE */
11011                 tp->dma_rwctrl |= 0x00180000;
11012         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11013                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11014                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11015                         tp->dma_rwctrl |= 0x003f0000;
11016                 else
11017                         tp->dma_rwctrl |= 0x003f000f;
11018         } else {
11019                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11020                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11021                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11022
11023                         /* If the 5704 is behind the EPB bridge, we can
11024                          * do the less restrictive ONE_DMA workaround for
11025                          * better performance.
11026                          */
11027                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11028                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11029                                 tp->dma_rwctrl |= 0x8000;
11030                         else if (ccval == 0x6 || ccval == 0x7)
11031                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11032
11033                         /* Set bit 23 to enable PCIX hw bug fix */
11034                         tp->dma_rwctrl |= 0x009f0000;
11035                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11036                         /* 5780 always in PCIX mode */
11037                         tp->dma_rwctrl |= 0x00144000;
11038                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11039                         /* 5714 always in PCIX mode */
11040                         tp->dma_rwctrl |= 0x00148000;
11041                 } else {
11042                         tp->dma_rwctrl |= 0x001b000f;
11043                 }
11044         }
11045
11046         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11047             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11048                 tp->dma_rwctrl &= 0xfffffff0;
11049
11050         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11051             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11052                 /* Remove this if it causes problems for some boards. */
11053                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11054
11055                 /* On 5700/5701 chips, we need to set this bit.
11056                  * Otherwise the chip will issue cacheline transactions
11057                  * to streamable DMA memory with not all the byte
11058                  * enables turned on.  This is an error on several
11059                  * RISC PCI controllers, in particular sparc64.
11060                  *
11061                  * On 5703/5704 chips, this bit has been reassigned
11062                  * a different meaning.  In particular, it is used
11063                  * on those chips to enable a PCI-X workaround.
11064                  */
11065                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11066         }
11067
11068         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11069
11070 #if 0
11071         /* Unneeded, already done by tg3_get_invariants.  */
11072         tg3_switch_clocks(tp);
11073 #endif
11074
11075         ret = 0;
11076         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11077             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11078                 goto out;
11079
11080         /* It is best to perform DMA test with maximum write burst size
11081          * to expose the 5700/5701 write DMA bug.
11082          */
11083         saved_dma_rwctrl = tp->dma_rwctrl;
11084         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11085         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11086
11087         while (1) {
11088                 u32 *p = buf, i;
11089
11090                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11091                         p[i] = i;
11092
11093                 /* Send the buffer to the chip. */
11094                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11095                 if (ret) {
11096                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11097                         break;
11098                 }
11099
11100 #if 0
11101                 /* validate data reached card RAM correctly. */
11102                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11103                         u32 val;
11104                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
11105                         if (le32_to_cpu(val) != p[i]) {
11106                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
11107                                 /* ret = -ENODEV here? */
11108                         }
11109                         p[i] = 0;
11110                 }
11111 #endif
11112                 /* Now read it back. */
11113                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11114                 if (ret) {
11115                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11116
11117                         break;
11118                 }
11119
11120                 /* Verify it. */
11121                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11122                         if (p[i] == i)
11123                                 continue;
11124
11125                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11126                             DMA_RWCTRL_WRITE_BNDRY_16) {
11127                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11128                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11129                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11130                                 break;
11131                         } else {
11132                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11133                                 ret = -ENODEV;
11134                                 goto out;
11135                         }
11136                 }
11137
11138                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11139                         /* Success. */
11140                         ret = 0;
11141                         break;
11142                 }
11143         }
11144         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11145             DMA_RWCTRL_WRITE_BNDRY_16) {
11146                 static struct pci_device_id dma_wait_state_chipsets[] = {
11147                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11148                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11149                         { },
11150                 };
11151
11152                 /* DMA test passed without adjusting DMA boundary,
11153                  * now look for chipsets that are known to expose the
11154                  * DMA bug without failing the test.
11155                  */
11156                 if (pci_dev_present(dma_wait_state_chipsets)) {
11157                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11158                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11159                 }
11160                 else
11161                         /* Safe to use the calculated DMA boundary. */
11162                         tp->dma_rwctrl = saved_dma_rwctrl;
11163
11164                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11165         }
11166
11167 out:
11168         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11169 out_nofree:
11170         return ret;
11171 }
11172
11173 static void __devinit tg3_init_link_config(struct tg3 *tp)
11174 {
11175         tp->link_config.advertising =
11176                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11177                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11178                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11179                  ADVERTISED_Autoneg | ADVERTISED_MII);
11180         tp->link_config.speed = SPEED_INVALID;
11181         tp->link_config.duplex = DUPLEX_INVALID;
11182         tp->link_config.autoneg = AUTONEG_ENABLE;
11183         tp->link_config.active_speed = SPEED_INVALID;
11184         tp->link_config.active_duplex = DUPLEX_INVALID;
11185         tp->link_config.phy_is_low_power = 0;
11186         tp->link_config.orig_speed = SPEED_INVALID;
11187         tp->link_config.orig_duplex = DUPLEX_INVALID;
11188         tp->link_config.orig_autoneg = AUTONEG_INVALID;
11189 }
11190
11191 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11192 {
11193         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11194                 tp->bufmgr_config.mbuf_read_dma_low_water =
11195                         DEFAULT_MB_RDMA_LOW_WATER_5705;
11196                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11197                         DEFAULT_MB_MACRX_LOW_WATER_5705;
11198                 tp->bufmgr_config.mbuf_high_water =
11199                         DEFAULT_MB_HIGH_WATER_5705;
11200
11201                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11202                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11203                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11204                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11205                 tp->bufmgr_config.mbuf_high_water_jumbo =
11206                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11207         } else {
11208                 tp->bufmgr_config.mbuf_read_dma_low_water =
11209                         DEFAULT_MB_RDMA_LOW_WATER;
11210                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11211                         DEFAULT_MB_MACRX_LOW_WATER;
11212                 tp->bufmgr_config.mbuf_high_water =
11213                         DEFAULT_MB_HIGH_WATER;
11214
11215                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11216                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11217                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11218                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11219                 tp->bufmgr_config.mbuf_high_water_jumbo =
11220                         DEFAULT_MB_HIGH_WATER_JUMBO;
11221         }
11222
11223         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11224         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11225 }
11226
11227 static char * __devinit tg3_phy_string(struct tg3 *tp)
11228 {
11229         switch (tp->phy_id & PHY_ID_MASK) {
11230         case PHY_ID_BCM5400:    return "5400";
11231         case PHY_ID_BCM5401:    return "5401";
11232         case PHY_ID_BCM5411:    return "5411";
11233         case PHY_ID_BCM5701:    return "5701";
11234         case PHY_ID_BCM5703:    return "5703";
11235         case PHY_ID_BCM5704:    return "5704";
11236         case PHY_ID_BCM5705:    return "5705";
11237         case PHY_ID_BCM5750:    return "5750";
11238         case PHY_ID_BCM5752:    return "5752";
11239         case PHY_ID_BCM5714:    return "5714";
11240         case PHY_ID_BCM5780:    return "5780";
11241         case PHY_ID_BCM5755:    return "5755";
11242         case PHY_ID_BCM5787:    return "5787";
11243         case PHY_ID_BCM8002:    return "8002/serdes";
11244         case 0:                 return "serdes";
11245         default:                return "unknown";
11246         };
11247 }
11248
11249 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11250 {
11251         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11252                 strcpy(str, "PCI Express");
11253                 return str;
11254         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11255                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11256
11257                 strcpy(str, "PCIX:");
11258
11259                 if ((clock_ctrl == 7) ||
11260                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11261                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11262                         strcat(str, "133MHz");
11263                 else if (clock_ctrl == 0)
11264                         strcat(str, "33MHz");
11265                 else if (clock_ctrl == 2)
11266                         strcat(str, "50MHz");
11267                 else if (clock_ctrl == 4)
11268                         strcat(str, "66MHz");
11269                 else if (clock_ctrl == 6)
11270                         strcat(str, "100MHz");
11271         } else {
11272                 strcpy(str, "PCI:");
11273                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11274                         strcat(str, "66MHz");
11275                 else
11276                         strcat(str, "33MHz");
11277         }
11278         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11279                 strcat(str, ":32-bit");
11280         else
11281                 strcat(str, ":64-bit");
11282         return str;
11283 }
11284
11285 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11286 {
11287         struct pci_dev *peer;
11288         unsigned int func, devnr = tp->pdev->devfn & ~7;
11289
11290         for (func = 0; func < 8; func++) {
11291                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11292                 if (peer && peer != tp->pdev)
11293                         break;
11294                 pci_dev_put(peer);
11295         }
11296         /* 5704 can be configured in single-port mode, set peer to
11297          * tp->pdev in that case.
11298          */
11299         if (!peer) {
11300                 peer = tp->pdev;
11301                 return peer;
11302         }
11303
11304         /*
11305          * We don't need to keep the refcount elevated; there's no way
11306          * to remove one half of this device without removing the other
11307          */
11308         pci_dev_put(peer);
11309
11310         return peer;
11311 }
11312
11313 static void __devinit tg3_init_coal(struct tg3 *tp)
11314 {
11315         struct ethtool_coalesce *ec = &tp->coal;
11316
11317         memset(ec, 0, sizeof(*ec));
11318         ec->cmd = ETHTOOL_GCOALESCE;
11319         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11320         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11321         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11322         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11323         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11324         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11325         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11326         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11327         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11328
11329         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11330                                  HOSTCC_MODE_CLRTICK_TXBD)) {
11331                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11332                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11333                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11334                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11335         }
11336
11337         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11338                 ec->rx_coalesce_usecs_irq = 0;
11339                 ec->tx_coalesce_usecs_irq = 0;
11340                 ec->stats_block_coalesce_usecs = 0;
11341         }
11342 }
11343
11344 static int __devinit tg3_init_one(struct pci_dev *pdev,
11345                                   const struct pci_device_id *ent)
11346 {
11347         static int tg3_version_printed = 0;
11348         unsigned long tg3reg_base, tg3reg_len;
11349         struct net_device *dev;
11350         struct tg3 *tp;
11351         int i, err, pm_cap;
11352         char str[40];
11353         u64 dma_mask, persist_dma_mask;
11354
11355         if (tg3_version_printed++ == 0)
11356                 printk(KERN_INFO "%s", version);
11357
11358         err = pci_enable_device(pdev);
11359         if (err) {
11360                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11361                        "aborting.\n");
11362                 return err;
11363         }
11364
11365         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11366                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11367                        "base address, aborting.\n");
11368                 err = -ENODEV;
11369                 goto err_out_disable_pdev;
11370         }
11371
11372         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11373         if (err) {
11374                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11375                        "aborting.\n");
11376                 goto err_out_disable_pdev;
11377         }
11378
11379         pci_set_master(pdev);
11380
11381         /* Find power-management capability. */
11382         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11383         if (pm_cap == 0) {
11384                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11385                        "aborting.\n");
11386                 err = -EIO;
11387                 goto err_out_free_res;
11388         }
11389
11390         tg3reg_base = pci_resource_start(pdev, 0);
11391         tg3reg_len = pci_resource_len(pdev, 0);
11392
11393         dev = alloc_etherdev(sizeof(*tp));
11394         if (!dev) {
11395                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11396                 err = -ENOMEM;
11397                 goto err_out_free_res;
11398         }
11399
11400         SET_MODULE_OWNER(dev);
11401         SET_NETDEV_DEV(dev, &pdev->dev);
11402
11403 #if TG3_VLAN_TAG_USED
11404         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11405         dev->vlan_rx_register = tg3_vlan_rx_register;
11406         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11407 #endif
11408
11409         tp = netdev_priv(dev);
11410         tp->pdev = pdev;
11411         tp->dev = dev;
11412         tp->pm_cap = pm_cap;
11413         tp->mac_mode = TG3_DEF_MAC_MODE;
11414         tp->rx_mode = TG3_DEF_RX_MODE;
11415         tp->tx_mode = TG3_DEF_TX_MODE;
11416         tp->mi_mode = MAC_MI_MODE_BASE;
11417         if (tg3_debug > 0)
11418                 tp->msg_enable = tg3_debug;
11419         else
11420                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11421
11422         /* The word/byte swap controls here control register access byte
11423          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11424          * setting below.
11425          */
11426         tp->misc_host_ctrl =
11427                 MISC_HOST_CTRL_MASK_PCI_INT |
11428                 MISC_HOST_CTRL_WORD_SWAP |
11429                 MISC_HOST_CTRL_INDIR_ACCESS |
11430                 MISC_HOST_CTRL_PCISTATE_RW;
11431
11432         /* The NONFRM (non-frame) byte/word swap controls take effect
11433          * on descriptor entries, anything which isn't packet data.
11434          *
11435          * The StrongARM chips on the board (one for tx, one for rx)
11436          * are running in big-endian mode.
11437          */
11438         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11439                         GRC_MODE_WSWAP_NONFRM_DATA);
11440 #ifdef __BIG_ENDIAN
11441         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11442 #endif
11443         spin_lock_init(&tp->lock);
11444         spin_lock_init(&tp->indirect_lock);
11445         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11446
11447         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11448         if (tp->regs == 0UL) {
11449                 printk(KERN_ERR PFX "Cannot map device registers, "
11450                        "aborting.\n");
11451                 err = -ENOMEM;
11452                 goto err_out_free_dev;
11453         }
11454
11455         tg3_init_link_config(tp);
11456
11457         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11458         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11459         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11460
11461         dev->open = tg3_open;
11462         dev->stop = tg3_close;
11463         dev->get_stats = tg3_get_stats;
11464         dev->set_multicast_list = tg3_set_rx_mode;
11465         dev->set_mac_address = tg3_set_mac_addr;
11466         dev->do_ioctl = tg3_ioctl;
11467         dev->tx_timeout = tg3_tx_timeout;
11468         dev->poll = tg3_poll;
11469         dev->ethtool_ops = &tg3_ethtool_ops;
11470         dev->weight = 64;
11471         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11472         dev->change_mtu = tg3_change_mtu;
11473         dev->irq = pdev->irq;
11474 #ifdef CONFIG_NET_POLL_CONTROLLER
11475         dev->poll_controller = tg3_poll_controller;
11476 #endif
11477
11478         err = tg3_get_invariants(tp);
11479         if (err) {
11480                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11481                        "aborting.\n");
11482                 goto err_out_iounmap;
11483         }
11484
11485         /* The EPB bridge inside 5714, 5715, and 5780 and any
11486          * device behind the EPB cannot support DMA addresses > 40-bit.
11487          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11488          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11489          * do DMA address check in tg3_start_xmit().
11490          */
11491         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11492                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11493         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11494                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11495 #ifdef CONFIG_HIGHMEM
11496                 dma_mask = DMA_64BIT_MASK;
11497 #endif
11498         } else
11499                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11500
11501         /* Configure DMA attributes. */
11502         if (dma_mask > DMA_32BIT_MASK) {
11503                 err = pci_set_dma_mask(pdev, dma_mask);
11504                 if (!err) {
11505                         dev->features |= NETIF_F_HIGHDMA;
11506                         err = pci_set_consistent_dma_mask(pdev,
11507                                                           persist_dma_mask);
11508                         if (err < 0) {
11509                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11510                                        "DMA for consistent allocations\n");
11511                                 goto err_out_iounmap;
11512                         }
11513                 }
11514         }
11515         if (err || dma_mask == DMA_32BIT_MASK) {
11516                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11517                 if (err) {
11518                         printk(KERN_ERR PFX "No usable DMA configuration, "
11519                                "aborting.\n");
11520                         goto err_out_iounmap;
11521                 }
11522         }
11523
11524         tg3_init_bufmgr_config(tp);
11525
11526 #if TG3_TSO_SUPPORT != 0
11527         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11528                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11529         }
11530         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11531             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11532             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11533             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11534                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11535         } else {
11536                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11537         }
11538
11539         /* TSO is on by default on chips that support hardware TSO.
11540          * Firmware TSO on older chips gives lower performance, so it
11541          * is off by default, but can be enabled using ethtool.
11542          */
11543         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11544                 dev->features |= NETIF_F_TSO;
11545                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
11546                         dev->features |= NETIF_F_TSO6;
11547         }
11548
11549 #endif
11550
11551         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11552             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11553             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11554                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11555                 tp->rx_pending = 63;
11556         }
11557
11558         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11559             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11560                 tp->pdev_peer = tg3_find_peer(tp);
11561
11562         err = tg3_get_device_address(tp);
11563         if (err) {
11564                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11565                        "aborting.\n");
11566                 goto err_out_iounmap;
11567         }
11568
11569         /*
11570          * Reset chip in case UNDI or EFI driver did not shutdown
11571          * DMA self test will enable WDMAC and we'll see (spurious)
11572          * pending DMA on the PCI bus at that point.
11573          */
11574         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11575             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11576                 pci_save_state(tp->pdev);
11577                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11578                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11579         }
11580
11581         err = tg3_test_dma(tp);
11582         if (err) {
11583                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11584                 goto err_out_iounmap;
11585         }
11586
11587         /* Tigon3 can do ipv4 only... and some chips have buggy
11588          * checksumming.
11589          */
11590         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11591                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11592                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11593                         dev->features |= NETIF_F_HW_CSUM;
11594                 else
11595                         dev->features |= NETIF_F_IP_CSUM;
11596                 dev->features |= NETIF_F_SG;
11597                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11598         } else
11599                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11600
11601         /* flow control autonegotiation is default behavior */
11602         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11603
11604         tg3_init_coal(tp);
11605
11606         /* Now that we have fully setup the chip, save away a snapshot
11607          * of the PCI config space.  We need to restore this after
11608          * GRC_MISC_CFG core clock resets and some resume events.
11609          */
11610         pci_save_state(tp->pdev);
11611
11612         err = register_netdev(dev);
11613         if (err) {
11614                 printk(KERN_ERR PFX "Cannot register net device, "
11615                        "aborting.\n");
11616                 goto err_out_iounmap;
11617         }
11618
11619         pci_set_drvdata(pdev, dev);
11620
11621         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11622                dev->name,
11623                tp->board_part_number,
11624                tp->pci_chip_rev_id,
11625                tg3_phy_string(tp),
11626                tg3_bus_string(tp, str),
11627                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11628
11629         for (i = 0; i < 6; i++)
11630                 printk("%2.2x%c", dev->dev_addr[i],
11631                        i == 5 ? '\n' : ':');
11632
11633         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11634                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11635                "TSOcap[%d] \n",
11636                dev->name,
11637                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11638                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11639                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11640                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11641                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11642                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11643                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11644         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11645                dev->name, tp->dma_rwctrl,
11646                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11647                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11648
11649         netif_carrier_off(tp->dev);
11650
11651         return 0;
11652
11653 err_out_iounmap:
11654         if (tp->regs) {
11655                 iounmap(tp->regs);
11656                 tp->regs = NULL;
11657         }
11658
11659 err_out_free_dev:
11660         free_netdev(dev);
11661
11662 err_out_free_res:
11663         pci_release_regions(pdev);
11664
11665 err_out_disable_pdev:
11666         pci_disable_device(pdev);
11667         pci_set_drvdata(pdev, NULL);
11668         return err;
11669 }
11670
11671 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11672 {
11673         struct net_device *dev = pci_get_drvdata(pdev);
11674
11675         if (dev) {
11676                 struct tg3 *tp = netdev_priv(dev);
11677
11678                 flush_scheduled_work();
11679                 unregister_netdev(dev);
11680                 if (tp->regs) {
11681                         iounmap(tp->regs);
11682                         tp->regs = NULL;
11683                 }
11684                 free_netdev(dev);
11685                 pci_release_regions(pdev);
11686                 pci_disable_device(pdev);
11687                 pci_set_drvdata(pdev, NULL);
11688         }
11689 }
11690
11691 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11692 {
11693         struct net_device *dev = pci_get_drvdata(pdev);
11694         struct tg3 *tp = netdev_priv(dev);
11695         int err;
11696
11697         if (!netif_running(dev))
11698                 return 0;
11699
11700         flush_scheduled_work();
11701         tg3_netif_stop(tp);
11702
11703         del_timer_sync(&tp->timer);
11704
11705         tg3_full_lock(tp, 1);
11706         tg3_disable_ints(tp);
11707         tg3_full_unlock(tp);
11708
11709         netif_device_detach(dev);
11710
11711         tg3_full_lock(tp, 0);
11712         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11713         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11714         tg3_full_unlock(tp);
11715
11716         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11717         if (err) {
11718                 tg3_full_lock(tp, 0);
11719
11720                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11721                 if (tg3_restart_hw(tp, 1))
11722                         goto out;
11723
11724                 tp->timer.expires = jiffies + tp->timer_offset;
11725                 add_timer(&tp->timer);
11726
11727                 netif_device_attach(dev);
11728                 tg3_netif_start(tp);
11729
11730 out:
11731                 tg3_full_unlock(tp);
11732         }
11733
11734         return err;
11735 }
11736
11737 static int tg3_resume(struct pci_dev *pdev)
11738 {
11739         struct net_device *dev = pci_get_drvdata(pdev);
11740         struct tg3 *tp = netdev_priv(dev);
11741         int err;
11742
11743         if (!netif_running(dev))
11744                 return 0;
11745
11746         pci_restore_state(tp->pdev);
11747
11748         err = tg3_set_power_state(tp, PCI_D0);
11749         if (err)
11750                 return err;
11751
11752         netif_device_attach(dev);
11753
11754         tg3_full_lock(tp, 0);
11755
11756         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11757         err = tg3_restart_hw(tp, 1);
11758         if (err)
11759                 goto out;
11760
11761         tp->timer.expires = jiffies + tp->timer_offset;
11762         add_timer(&tp->timer);
11763
11764         tg3_netif_start(tp);
11765
11766 out:
11767         tg3_full_unlock(tp);
11768
11769         return err;
11770 }
11771
11772 static struct pci_driver tg3_driver = {
11773         .name           = DRV_MODULE_NAME,
11774         .id_table       = tg3_pci_tbl,
11775         .probe          = tg3_init_one,
11776         .remove         = __devexit_p(tg3_remove_one),
11777         .suspend        = tg3_suspend,
11778         .resume         = tg3_resume
11779 };
11780
11781 static int __init tg3_init(void)
11782 {
11783         return pci_register_driver(&tg3_driver);
11784 }
11785
11786 static void __exit tg3_cleanup(void)
11787 {
11788         pci_unregister_driver(&tg3_driver);
11789 }
11790
11791 module_init(tg3_init);
11792 module_exit(tg3_cleanup);