]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/tg3.c
c504ff29d44c04f9dc4d074a803c8f26487d674b
[karo-tx-linux.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 #include <linux/config.h>
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mii.h>
36 #include <linux/if_vlan.h>
37 #include <linux/ip.h>
38 #include <linux/tcp.h>
39 #include <linux/workqueue.h>
40 #include <linux/prefetch.h>
41 #include <linux/dma-mapping.h>
42
43 #include <net/checksum.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC64
51 #include <asm/idprom.h>
52 #include <asm/oplib.h>
53 #include <asm/pbm.h>
54 #endif
55
56 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
57 #define TG3_VLAN_TAG_USED 1
58 #else
59 #define TG3_VLAN_TAG_USED 0
60 #endif
61
62 #ifdef NETIF_F_TSO
63 #define TG3_TSO_SUPPORT 1
64 #else
65 #define TG3_TSO_SUPPORT 0
66 #endif
67
68 #include "tg3.h"
69
70 #define DRV_MODULE_NAME         "tg3"
71 #define PFX DRV_MODULE_NAME     ": "
72 #define DRV_MODULE_VERSION      "3.54"
73 #define DRV_MODULE_RELDATE      "Mar 23, 2006"
74
75 #define TG3_DEF_MAC_MODE        0
76 #define TG3_DEF_RX_MODE         0
77 #define TG3_DEF_TX_MODE         0
78 #define TG3_DEF_MSG_ENABLE        \
79         (NETIF_MSG_DRV          | \
80          NETIF_MSG_PROBE        | \
81          NETIF_MSG_LINK         | \
82          NETIF_MSG_TIMER        | \
83          NETIF_MSG_IFDOWN       | \
84          NETIF_MSG_IFUP         | \
85          NETIF_MSG_RX_ERR       | \
86          NETIF_MSG_TX_ERR)
87
88 /* length of time before we decide the hardware is borked,
89  * and dev->tx_timeout() should be called to fix the problem
90  */
91 #define TG3_TX_TIMEOUT                  (5 * HZ)
92
93 /* hardware minimum and maximum for a single frame's data payload */
94 #define TG3_MIN_MTU                     60
95 #define TG3_MAX_MTU(tp) \
96         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97
98 /* These numbers seem to be hard coded in the NIC firmware somehow.
99  * You can't change the ring sizes, but you can change where you place
100  * them in the NIC onboard memory.
101  */
102 #define TG3_RX_RING_SIZE                512
103 #define TG3_DEF_RX_RING_PENDING         200
104 #define TG3_RX_JUMBO_RING_SIZE          256
105 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
106
107 /* Do not place this n-ring entries value into the tp struct itself,
108  * we really want to expose these constants to GCC so that modulo et
109  * al.  operations are done with shifts and masks instead of with
110  * hw multiply/modulo instructions.  Another solution would be to
111  * replace things like '% foo' with '& (foo - 1)'.
112  */
113 #define TG3_RX_RCB_RING_SIZE(tp)        \
114         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
115
116 #define TG3_TX_RING_SIZE                512
117 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
118
119 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
120                                  TG3_RX_RING_SIZE)
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122                                  TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124                                    TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
126                                  TG3_TX_RING_SIZE)
127 #define TX_BUFFS_AVAIL(TP)                                              \
128         ((TP)->tx_pending -                                             \
129          (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
130 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131
132 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
134
135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
137
138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
140
141 #define TG3_NUM_TEST            6
142
143 static char version[] __devinitdata =
144         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
145
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION);
150
151 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug, int, 0);
153 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
154
155 static struct pci_device_id tg3_pci_tbl[] = {
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
227           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
231           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
235           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
237           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
245           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
246         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
247           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
248         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
249           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
250         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
251           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
252         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
253           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
254         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
255           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
256         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
257           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
258         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
259           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
260         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
261           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
262         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
263           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
264         { 0, }
265 };
266
267 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
268
269 static struct {
270         const char string[ETH_GSTRING_LEN];
271 } ethtool_stats_keys[TG3_NUM_STATS] = {
272         { "rx_octets" },
273         { "rx_fragments" },
274         { "rx_ucast_packets" },
275         { "rx_mcast_packets" },
276         { "rx_bcast_packets" },
277         { "rx_fcs_errors" },
278         { "rx_align_errors" },
279         { "rx_xon_pause_rcvd" },
280         { "rx_xoff_pause_rcvd" },
281         { "rx_mac_ctrl_rcvd" },
282         { "rx_xoff_entered" },
283         { "rx_frame_too_long_errors" },
284         { "rx_jabbers" },
285         { "rx_undersize_packets" },
286         { "rx_in_length_errors" },
287         { "rx_out_length_errors" },
288         { "rx_64_or_less_octet_packets" },
289         { "rx_65_to_127_octet_packets" },
290         { "rx_128_to_255_octet_packets" },
291         { "rx_256_to_511_octet_packets" },
292         { "rx_512_to_1023_octet_packets" },
293         { "rx_1024_to_1522_octet_packets" },
294         { "rx_1523_to_2047_octet_packets" },
295         { "rx_2048_to_4095_octet_packets" },
296         { "rx_4096_to_8191_octet_packets" },
297         { "rx_8192_to_9022_octet_packets" },
298
299         { "tx_octets" },
300         { "tx_collisions" },
301
302         { "tx_xon_sent" },
303         { "tx_xoff_sent" },
304         { "tx_flow_control" },
305         { "tx_mac_errors" },
306         { "tx_single_collisions" },
307         { "tx_mult_collisions" },
308         { "tx_deferred" },
309         { "tx_excessive_collisions" },
310         { "tx_late_collisions" },
311         { "tx_collide_2times" },
312         { "tx_collide_3times" },
313         { "tx_collide_4times" },
314         { "tx_collide_5times" },
315         { "tx_collide_6times" },
316         { "tx_collide_7times" },
317         { "tx_collide_8times" },
318         { "tx_collide_9times" },
319         { "tx_collide_10times" },
320         { "tx_collide_11times" },
321         { "tx_collide_12times" },
322         { "tx_collide_13times" },
323         { "tx_collide_14times" },
324         { "tx_collide_15times" },
325         { "tx_ucast_packets" },
326         { "tx_mcast_packets" },
327         { "tx_bcast_packets" },
328         { "tx_carrier_sense_errors" },
329         { "tx_discards" },
330         { "tx_errors" },
331
332         { "dma_writeq_full" },
333         { "dma_write_prioq_full" },
334         { "rxbds_empty" },
335         { "rx_discards" },
336         { "rx_errors" },
337         { "rx_threshold_hit" },
338
339         { "dma_readq_full" },
340         { "dma_read_prioq_full" },
341         { "tx_comp_queue_full" },
342
343         { "ring_set_send_prod_index" },
344         { "ring_status_update" },
345         { "nic_irqs" },
346         { "nic_avoided_irqs" },
347         { "nic_tx_threshold_hit" }
348 };
349
350 static struct {
351         const char string[ETH_GSTRING_LEN];
352 } ethtool_test_keys[TG3_NUM_TEST] = {
353         { "nvram test     (online) " },
354         { "link test      (online) " },
355         { "register test  (offline)" },
356         { "memory test    (offline)" },
357         { "loopback test  (offline)" },
358         { "interrupt test (offline)" },
359 };
360
361 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
362 {
363         writel(val, tp->regs + off);
364 }
365
366 static u32 tg3_read32(struct tg3 *tp, u32 off)
367 {
368         return (readl(tp->regs + off)); 
369 }
370
371 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
372 {
373         unsigned long flags;
374
375         spin_lock_irqsave(&tp->indirect_lock, flags);
376         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
377         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
378         spin_unlock_irqrestore(&tp->indirect_lock, flags);
379 }
380
381 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
382 {
383         writel(val, tp->regs + off);
384         readl(tp->regs + off);
385 }
386
387 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
388 {
389         unsigned long flags;
390         u32 val;
391
392         spin_lock_irqsave(&tp->indirect_lock, flags);
393         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
394         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
395         spin_unlock_irqrestore(&tp->indirect_lock, flags);
396         return val;
397 }
398
399 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
400 {
401         unsigned long flags;
402
403         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
404                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
405                                        TG3_64BIT_REG_LOW, val);
406                 return;
407         }
408         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
409                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
410                                        TG3_64BIT_REG_LOW, val);
411                 return;
412         }
413
414         spin_lock_irqsave(&tp->indirect_lock, flags);
415         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
416         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
417         spin_unlock_irqrestore(&tp->indirect_lock, flags);
418
419         /* In indirect mode when disabling interrupts, we also need
420          * to clear the interrupt bit in the GRC local ctrl register.
421          */
422         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
423             (val == 0x1)) {
424                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
425                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
426         }
427 }
428
429 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
430 {
431         unsigned long flags;
432         u32 val;
433
434         spin_lock_irqsave(&tp->indirect_lock, flags);
435         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
436         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
437         spin_unlock_irqrestore(&tp->indirect_lock, flags);
438         return val;
439 }
440
441 /* usec_wait specifies the wait time in usec when writing to certain registers
442  * where it is unsafe to read back the register without some delay.
443  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
444  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
445  */
446 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
447 {
448         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
449             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
450                 /* Non-posted methods */
451                 tp->write32(tp, off, val);
452         else {
453                 /* Posted method */
454                 tg3_write32(tp, off, val);
455                 if (usec_wait)
456                         udelay(usec_wait);
457                 tp->read32(tp, off);
458         }
459         /* Wait again after the read for the posted method to guarantee that
460          * the wait time is met.
461          */
462         if (usec_wait)
463                 udelay(usec_wait);
464 }
465
466 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
467 {
468         tp->write32_mbox(tp, off, val);
469         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
470             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
471                 tp->read32_mbox(tp, off);
472 }
473
474 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
475 {
476         void __iomem *mbox = tp->regs + off;
477         writel(val, mbox);
478         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
479                 writel(val, mbox);
480         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
481                 readl(mbox);
482 }
483
484 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
485 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
486 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
487 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
488 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
489
490 #define tw32(reg,val)           tp->write32(tp, reg, val)
491 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
492 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
493 #define tr32(reg)               tp->read32(tp, reg)
494
495 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
496 {
497         unsigned long flags;
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         if (tp->write32 != tg3_write_indirect_reg32) {
501                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
502                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
503
504                 /* Always leave this as zero. */
505                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
506         } else {
507                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
508                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
509
510                 /* Always leave this as zero. */
511                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
512         }
513         spin_unlock_irqrestore(&tp->indirect_lock, flags);
514 }
515
516 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
517 {
518         unsigned long flags;
519
520         spin_lock_irqsave(&tp->indirect_lock, flags);
521         if (tp->write32 != tg3_write_indirect_reg32) {
522                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
523                 *val = tr32(TG3PCI_MEM_WIN_DATA);
524
525                 /* Always leave this as zero. */
526                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
527         } else {
528                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
529                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
530
531                 /* Always leave this as zero. */
532                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
533         }
534         spin_unlock_irqrestore(&tp->indirect_lock, flags);
535 }
536
537 static void tg3_disable_ints(struct tg3 *tp)
538 {
539         tw32(TG3PCI_MISC_HOST_CTRL,
540              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
541         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
542 }
543
544 static inline void tg3_cond_int(struct tg3 *tp)
545 {
546         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
547             (tp->hw_status->status & SD_STATUS_UPDATED))
548                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
549 }
550
551 static void tg3_enable_ints(struct tg3 *tp)
552 {
553         tp->irq_sync = 0;
554         wmb();
555
556         tw32(TG3PCI_MISC_HOST_CTRL,
557              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
558         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
559                        (tp->last_tag << 24));
560         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
561                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
562                                (tp->last_tag << 24));
563         tg3_cond_int(tp);
564 }
565
566 static inline unsigned int tg3_has_work(struct tg3 *tp)
567 {
568         struct tg3_hw_status *sblk = tp->hw_status;
569         unsigned int work_exists = 0;
570
571         /* check for phy events */
572         if (!(tp->tg3_flags &
573               (TG3_FLAG_USE_LINKCHG_REG |
574                TG3_FLAG_POLL_SERDES))) {
575                 if (sblk->status & SD_STATUS_LINK_CHG)
576                         work_exists = 1;
577         }
578         /* check for RX/TX work to do */
579         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
580             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
581                 work_exists = 1;
582
583         return work_exists;
584 }
585
586 /* tg3_restart_ints
587  *  similar to tg3_enable_ints, but it accurately determines whether there
588  *  is new work pending and can return without flushing the PIO write
589  *  which reenables interrupts 
590  */
591 static void tg3_restart_ints(struct tg3 *tp)
592 {
593         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
594                      tp->last_tag << 24);
595         mmiowb();
596
597         /* When doing tagged status, this work check is unnecessary.
598          * The last_tag we write above tells the chip which piece of
599          * work we've completed.
600          */
601         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
602             tg3_has_work(tp))
603                 tw32(HOSTCC_MODE, tp->coalesce_mode |
604                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
605 }
606
607 static inline void tg3_netif_stop(struct tg3 *tp)
608 {
609         tp->dev->trans_start = jiffies; /* prevent tx timeout */
610         netif_poll_disable(tp->dev);
611         netif_tx_disable(tp->dev);
612 }
613
614 static inline void tg3_netif_start(struct tg3 *tp)
615 {
616         netif_wake_queue(tp->dev);
617         /* NOTE: unconditional netif_wake_queue is only appropriate
618          * so long as all callers are assured to have free tx slots
619          * (such as after tg3_init_hw)
620          */
621         netif_poll_enable(tp->dev);
622         tp->hw_status->status |= SD_STATUS_UPDATED;
623         tg3_enable_ints(tp);
624 }
625
626 static void tg3_switch_clocks(struct tg3 *tp)
627 {
628         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
629         u32 orig_clock_ctrl;
630
631         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
632                 return;
633
634         orig_clock_ctrl = clock_ctrl;
635         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
636                        CLOCK_CTRL_CLKRUN_OENABLE |
637                        0x1f);
638         tp->pci_clock_ctrl = clock_ctrl;
639
640         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
641                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
642                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
643                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
644                 }
645         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
646                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
647                             clock_ctrl |
648                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
649                             40);
650                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
651                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
652                             40);
653         }
654         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
655 }
656
657 #define PHY_BUSY_LOOPS  5000
658
659 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
660 {
661         u32 frame_val;
662         unsigned int loops;
663         int ret;
664
665         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
666                 tw32_f(MAC_MI_MODE,
667                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
668                 udelay(80);
669         }
670
671         *val = 0x0;
672
673         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
674                       MI_COM_PHY_ADDR_MASK);
675         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
676                       MI_COM_REG_ADDR_MASK);
677         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
678         
679         tw32_f(MAC_MI_COM, frame_val);
680
681         loops = PHY_BUSY_LOOPS;
682         while (loops != 0) {
683                 udelay(10);
684                 frame_val = tr32(MAC_MI_COM);
685
686                 if ((frame_val & MI_COM_BUSY) == 0) {
687                         udelay(5);
688                         frame_val = tr32(MAC_MI_COM);
689                         break;
690                 }
691                 loops -= 1;
692         }
693
694         ret = -EBUSY;
695         if (loops != 0) {
696                 *val = frame_val & MI_COM_DATA_MASK;
697                 ret = 0;
698         }
699
700         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
701                 tw32_f(MAC_MI_MODE, tp->mi_mode);
702                 udelay(80);
703         }
704
705         return ret;
706 }
707
708 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
709 {
710         u32 frame_val;
711         unsigned int loops;
712         int ret;
713
714         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
715                 tw32_f(MAC_MI_MODE,
716                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
717                 udelay(80);
718         }
719
720         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
721                       MI_COM_PHY_ADDR_MASK);
722         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
723                       MI_COM_REG_ADDR_MASK);
724         frame_val |= (val & MI_COM_DATA_MASK);
725         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
726         
727         tw32_f(MAC_MI_COM, frame_val);
728
729         loops = PHY_BUSY_LOOPS;
730         while (loops != 0) {
731                 udelay(10);
732                 frame_val = tr32(MAC_MI_COM);
733                 if ((frame_val & MI_COM_BUSY) == 0) {
734                         udelay(5);
735                         frame_val = tr32(MAC_MI_COM);
736                         break;
737                 }
738                 loops -= 1;
739         }
740
741         ret = -EBUSY;
742         if (loops != 0)
743                 ret = 0;
744
745         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
746                 tw32_f(MAC_MI_MODE, tp->mi_mode);
747                 udelay(80);
748         }
749
750         return ret;
751 }
752
753 static void tg3_phy_set_wirespeed(struct tg3 *tp)
754 {
755         u32 val;
756
757         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
758                 return;
759
760         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
761             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
762                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
763                              (val | (1 << 15) | (1 << 4)));
764 }
765
766 static int tg3_bmcr_reset(struct tg3 *tp)
767 {
768         u32 phy_control;
769         int limit, err;
770
771         /* OK, reset it, and poll the BMCR_RESET bit until it
772          * clears or we time out.
773          */
774         phy_control = BMCR_RESET;
775         err = tg3_writephy(tp, MII_BMCR, phy_control);
776         if (err != 0)
777                 return -EBUSY;
778
779         limit = 5000;
780         while (limit--) {
781                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
782                 if (err != 0)
783                         return -EBUSY;
784
785                 if ((phy_control & BMCR_RESET) == 0) {
786                         udelay(40);
787                         break;
788                 }
789                 udelay(10);
790         }
791         if (limit <= 0)
792                 return -EBUSY;
793
794         return 0;
795 }
796
797 static int tg3_wait_macro_done(struct tg3 *tp)
798 {
799         int limit = 100;
800
801         while (limit--) {
802                 u32 tmp32;
803
804                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
805                         if ((tmp32 & 0x1000) == 0)
806                                 break;
807                 }
808         }
809         if (limit <= 0)
810                 return -EBUSY;
811
812         return 0;
813 }
814
815 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
816 {
817         static const u32 test_pat[4][6] = {
818         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
819         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
820         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
821         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
822         };
823         int chan;
824
825         for (chan = 0; chan < 4; chan++) {
826                 int i;
827
828                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
829                              (chan * 0x2000) | 0x0200);
830                 tg3_writephy(tp, 0x16, 0x0002);
831
832                 for (i = 0; i < 6; i++)
833                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
834                                      test_pat[chan][i]);
835
836                 tg3_writephy(tp, 0x16, 0x0202);
837                 if (tg3_wait_macro_done(tp)) {
838                         *resetp = 1;
839                         return -EBUSY;
840                 }
841
842                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
843                              (chan * 0x2000) | 0x0200);
844                 tg3_writephy(tp, 0x16, 0x0082);
845                 if (tg3_wait_macro_done(tp)) {
846                         *resetp = 1;
847                         return -EBUSY;
848                 }
849
850                 tg3_writephy(tp, 0x16, 0x0802);
851                 if (tg3_wait_macro_done(tp)) {
852                         *resetp = 1;
853                         return -EBUSY;
854                 }
855
856                 for (i = 0; i < 6; i += 2) {
857                         u32 low, high;
858
859                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
860                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
861                             tg3_wait_macro_done(tp)) {
862                                 *resetp = 1;
863                                 return -EBUSY;
864                         }
865                         low &= 0x7fff;
866                         high &= 0x000f;
867                         if (low != test_pat[chan][i] ||
868                             high != test_pat[chan][i+1]) {
869                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
870                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
871                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
872
873                                 return -EBUSY;
874                         }
875                 }
876         }
877
878         return 0;
879 }
880
881 static int tg3_phy_reset_chanpat(struct tg3 *tp)
882 {
883         int chan;
884
885         for (chan = 0; chan < 4; chan++) {
886                 int i;
887
888                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
889                              (chan * 0x2000) | 0x0200);
890                 tg3_writephy(tp, 0x16, 0x0002);
891                 for (i = 0; i < 6; i++)
892                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
893                 tg3_writephy(tp, 0x16, 0x0202);
894                 if (tg3_wait_macro_done(tp))
895                         return -EBUSY;
896         }
897
898         return 0;
899 }
900
901 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
902 {
903         u32 reg32, phy9_orig;
904         int retries, do_phy_reset, err;
905
906         retries = 10;
907         do_phy_reset = 1;
908         do {
909                 if (do_phy_reset) {
910                         err = tg3_bmcr_reset(tp);
911                         if (err)
912                                 return err;
913                         do_phy_reset = 0;
914                 }
915
916                 /* Disable transmitter and interrupt.  */
917                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
918                         continue;
919
920                 reg32 |= 0x3000;
921                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
922
923                 /* Set full-duplex, 1000 mbps.  */
924                 tg3_writephy(tp, MII_BMCR,
925                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
926
927                 /* Set to master mode.  */
928                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
929                         continue;
930
931                 tg3_writephy(tp, MII_TG3_CTRL,
932                              (MII_TG3_CTRL_AS_MASTER |
933                               MII_TG3_CTRL_ENABLE_AS_MASTER));
934
935                 /* Enable SM_DSP_CLOCK and 6dB.  */
936                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
937
938                 /* Block the PHY control access.  */
939                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
940                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
941
942                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
943                 if (!err)
944                         break;
945         } while (--retries);
946
947         err = tg3_phy_reset_chanpat(tp);
948         if (err)
949                 return err;
950
951         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
952         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
953
954         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
955         tg3_writephy(tp, 0x16, 0x0000);
956
957         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
958             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
959                 /* Set Extended packet length bit for jumbo frames */
960                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
961         }
962         else {
963                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
964         }
965
966         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
967
968         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
969                 reg32 &= ~0x3000;
970                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
971         } else if (!err)
972                 err = -EBUSY;
973
974         return err;
975 }
976
977 /* This will reset the tigon3 PHY if there is no valid
978  * link unless the FORCE argument is non-zero.
979  */
980 static int tg3_phy_reset(struct tg3 *tp)
981 {
982         u32 phy_status;
983         int err;
984
985         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
986         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
987         if (err != 0)
988                 return -EBUSY;
989
990         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
991             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
992             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
993                 err = tg3_phy_reset_5703_4_5(tp);
994                 if (err)
995                         return err;
996                 goto out;
997         }
998
999         err = tg3_bmcr_reset(tp);
1000         if (err)
1001                 return err;
1002
1003 out:
1004         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1005                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1006                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1007                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1008                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1009                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1010                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1011         }
1012         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1013                 tg3_writephy(tp, 0x1c, 0x8d68);
1014                 tg3_writephy(tp, 0x1c, 0x8d68);
1015         }
1016         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1017                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1018                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1019                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1020                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1021                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1022                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1023                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1024                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1025         }
1026         /* Set Extended packet length bit (bit 14) on all chips that */
1027         /* support jumbo frames */
1028         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1029                 /* Cannot do read-modify-write on 5401 */
1030                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1031         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1032                 u32 phy_reg;
1033
1034                 /* Set bit 14 with read-modify-write to preserve other bits */
1035                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1036                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1037                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1038         }
1039
1040         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1041          * jumbo frames transmission.
1042          */
1043         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1044                 u32 phy_reg;
1045
1046                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1047                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1048                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1049         }
1050
1051         tg3_phy_set_wirespeed(tp);
1052         return 0;
1053 }
1054
1055 static void tg3_frob_aux_power(struct tg3 *tp)
1056 {
1057         struct tg3 *tp_peer = tp;
1058
1059         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1060                 return;
1061
1062         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1063             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1064                 struct net_device *dev_peer;
1065
1066                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1067                 /* remove_one() may have been run on the peer. */
1068                 if (!dev_peer)
1069                         tp_peer = tp;
1070                 else
1071                         tp_peer = netdev_priv(dev_peer);
1072         }
1073
1074         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1075             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1076             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1077             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1078                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1079                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1080                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1081                                     (GRC_LCLCTRL_GPIO_OE0 |
1082                                      GRC_LCLCTRL_GPIO_OE1 |
1083                                      GRC_LCLCTRL_GPIO_OE2 |
1084                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1085                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1086                                     100);
1087                 } else {
1088                         u32 no_gpio2;
1089                         u32 grc_local_ctrl = 0;
1090
1091                         if (tp_peer != tp &&
1092                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1093                                 return;
1094
1095                         /* Workaround to prevent overdrawing Amps. */
1096                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1097                             ASIC_REV_5714) {
1098                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1099                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1100                                             grc_local_ctrl, 100);
1101                         }
1102
1103                         /* On 5753 and variants, GPIO2 cannot be used. */
1104                         no_gpio2 = tp->nic_sram_data_cfg &
1105                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1106
1107                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1108                                          GRC_LCLCTRL_GPIO_OE1 |
1109                                          GRC_LCLCTRL_GPIO_OE2 |
1110                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1111                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1112                         if (no_gpio2) {
1113                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1114                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1115                         }
1116                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1117                                                     grc_local_ctrl, 100);
1118
1119                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1120
1121                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1122                                                     grc_local_ctrl, 100);
1123
1124                         if (!no_gpio2) {
1125                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1126                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1127                                             grc_local_ctrl, 100);
1128                         }
1129                 }
1130         } else {
1131                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1132                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1133                         if (tp_peer != tp &&
1134                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1135                                 return;
1136
1137                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1138                                     (GRC_LCLCTRL_GPIO_OE1 |
1139                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1140
1141                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1142                                     GRC_LCLCTRL_GPIO_OE1, 100);
1143
1144                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1145                                     (GRC_LCLCTRL_GPIO_OE1 |
1146                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1147                 }
1148         }
1149 }
1150
1151 static int tg3_setup_phy(struct tg3 *, int);
1152
1153 #define RESET_KIND_SHUTDOWN     0
1154 #define RESET_KIND_INIT         1
1155 #define RESET_KIND_SUSPEND      2
1156
1157 static void tg3_write_sig_post_reset(struct tg3 *, int);
1158 static int tg3_halt_cpu(struct tg3 *, u32);
1159 static int tg3_nvram_lock(struct tg3 *);
1160 static void tg3_nvram_unlock(struct tg3 *);
1161
1162 static void tg3_power_down_phy(struct tg3 *tp)
1163 {
1164         /* The PHY should not be powered down on some chips because
1165          * of bugs.
1166          */
1167         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1168             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1169             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1170              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1171                 return;
1172         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1173 }
1174
1175 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1176 {
1177         u32 misc_host_ctrl;
1178         u16 power_control, power_caps;
1179         int pm = tp->pm_cap;
1180
1181         /* Make sure register accesses (indirect or otherwise)
1182          * will function correctly.
1183          */
1184         pci_write_config_dword(tp->pdev,
1185                                TG3PCI_MISC_HOST_CTRL,
1186                                tp->misc_host_ctrl);
1187
1188         pci_read_config_word(tp->pdev,
1189                              pm + PCI_PM_CTRL,
1190                              &power_control);
1191         power_control |= PCI_PM_CTRL_PME_STATUS;
1192         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1193         switch (state) {
1194         case PCI_D0:
1195                 power_control |= 0;
1196                 pci_write_config_word(tp->pdev,
1197                                       pm + PCI_PM_CTRL,
1198                                       power_control);
1199                 udelay(100);    /* Delay after power state change */
1200
1201                 /* Switch out of Vaux if it is not a LOM */
1202                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1203                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1204
1205                 return 0;
1206
1207         case PCI_D1:
1208                 power_control |= 1;
1209                 break;
1210
1211         case PCI_D2:
1212                 power_control |= 2;
1213                 break;
1214
1215         case PCI_D3hot:
1216                 power_control |= 3;
1217                 break;
1218
1219         default:
1220                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1221                        "requested.\n",
1222                        tp->dev->name, state);
1223                 return -EINVAL;
1224         };
1225
1226         power_control |= PCI_PM_CTRL_PME_ENABLE;
1227
1228         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1229         tw32(TG3PCI_MISC_HOST_CTRL,
1230              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1231
1232         if (tp->link_config.phy_is_low_power == 0) {
1233                 tp->link_config.phy_is_low_power = 1;
1234                 tp->link_config.orig_speed = tp->link_config.speed;
1235                 tp->link_config.orig_duplex = tp->link_config.duplex;
1236                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1237         }
1238
1239         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1240                 tp->link_config.speed = SPEED_10;
1241                 tp->link_config.duplex = DUPLEX_HALF;
1242                 tp->link_config.autoneg = AUTONEG_ENABLE;
1243                 tg3_setup_phy(tp, 0);
1244         }
1245
1246         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1247                 int i;
1248                 u32 val;
1249
1250                 for (i = 0; i < 200; i++) {
1251                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1252                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1253                                 break;
1254                         msleep(1);
1255                 }
1256         }
1257         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1258                                              WOL_DRV_STATE_SHUTDOWN |
1259                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1260
1261         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1262
1263         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1264                 u32 mac_mode;
1265
1266                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1267                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1268                         udelay(40);
1269
1270                         mac_mode = MAC_MODE_PORT_MODE_MII;
1271
1272                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1273                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1274                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1275                 } else {
1276                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1277                 }
1278
1279                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1280                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1281
1282                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1283                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1284                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1285
1286                 tw32_f(MAC_MODE, mac_mode);
1287                 udelay(100);
1288
1289                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1290                 udelay(10);
1291         }
1292
1293         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1294             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1295              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1296                 u32 base_val;
1297
1298                 base_val = tp->pci_clock_ctrl;
1299                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1300                              CLOCK_CTRL_TXCLK_DISABLE);
1301
1302                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1303                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1304         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1305                 /* do nothing */
1306         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1307                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1308                 u32 newbits1, newbits2;
1309
1310                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1311                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1312                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1313                                     CLOCK_CTRL_TXCLK_DISABLE |
1314                                     CLOCK_CTRL_ALTCLK);
1315                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1316                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1317                         newbits1 = CLOCK_CTRL_625_CORE;
1318                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1319                 } else {
1320                         newbits1 = CLOCK_CTRL_ALTCLK;
1321                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1322                 }
1323
1324                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1325                             40);
1326
1327                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1328                             40);
1329
1330                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1331                         u32 newbits3;
1332
1333                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1334                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1335                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1336                                             CLOCK_CTRL_TXCLK_DISABLE |
1337                                             CLOCK_CTRL_44MHZ_CORE);
1338                         } else {
1339                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1340                         }
1341
1342                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1343                                     tp->pci_clock_ctrl | newbits3, 40);
1344                 }
1345         }
1346
1347         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1348             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1349                 /* Turn off the PHY */
1350                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1351                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1352                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1353                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1354                         tg3_power_down_phy(tp);
1355                 }
1356         }
1357
1358         tg3_frob_aux_power(tp);
1359
1360         /* Workaround for unstable PLL clock */
1361         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1362             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1363                 u32 val = tr32(0x7d00);
1364
1365                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1366                 tw32(0x7d00, val);
1367                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1368                         int err;
1369
1370                         err = tg3_nvram_lock(tp);
1371                         tg3_halt_cpu(tp, RX_CPU_BASE);
1372                         if (!err)
1373                                 tg3_nvram_unlock(tp);
1374                 }
1375         }
1376
1377         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1378
1379         /* Finally, set the new power state. */
1380         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1381         udelay(100);    /* Delay after power state change */
1382
1383         return 0;
1384 }
1385
1386 static void tg3_link_report(struct tg3 *tp)
1387 {
1388         if (!netif_carrier_ok(tp->dev)) {
1389                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1390         } else {
1391                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1392                        tp->dev->name,
1393                        (tp->link_config.active_speed == SPEED_1000 ?
1394                         1000 :
1395                         (tp->link_config.active_speed == SPEED_100 ?
1396                          100 : 10)),
1397                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1398                         "full" : "half"));
1399
1400                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1401                        "%s for RX.\n",
1402                        tp->dev->name,
1403                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1404                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1405         }
1406 }
1407
1408 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1409 {
1410         u32 new_tg3_flags = 0;
1411         u32 old_rx_mode = tp->rx_mode;
1412         u32 old_tx_mode = tp->tx_mode;
1413
1414         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1415
1416                 /* Convert 1000BaseX flow control bits to 1000BaseT
1417                  * bits before resolving flow control.
1418                  */
1419                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1420                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1421                                        ADVERTISE_PAUSE_ASYM);
1422                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1423
1424                         if (local_adv & ADVERTISE_1000XPAUSE)
1425                                 local_adv |= ADVERTISE_PAUSE_CAP;
1426                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1427                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1428                         if (remote_adv & LPA_1000XPAUSE)
1429                                 remote_adv |= LPA_PAUSE_CAP;
1430                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1431                                 remote_adv |= LPA_PAUSE_ASYM;
1432                 }
1433
1434                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1435                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1436                                 if (remote_adv & LPA_PAUSE_CAP)
1437                                         new_tg3_flags |=
1438                                                 (TG3_FLAG_RX_PAUSE |
1439                                                 TG3_FLAG_TX_PAUSE);
1440                                 else if (remote_adv & LPA_PAUSE_ASYM)
1441                                         new_tg3_flags |=
1442                                                 (TG3_FLAG_RX_PAUSE);
1443                         } else {
1444                                 if (remote_adv & LPA_PAUSE_CAP)
1445                                         new_tg3_flags |=
1446                                                 (TG3_FLAG_RX_PAUSE |
1447                                                 TG3_FLAG_TX_PAUSE);
1448                         }
1449                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1450                         if ((remote_adv & LPA_PAUSE_CAP) &&
1451                         (remote_adv & LPA_PAUSE_ASYM))
1452                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1453                 }
1454
1455                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1456                 tp->tg3_flags |= new_tg3_flags;
1457         } else {
1458                 new_tg3_flags = tp->tg3_flags;
1459         }
1460
1461         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1462                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1463         else
1464                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1465
1466         if (old_rx_mode != tp->rx_mode) {
1467                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1468         }
1469         
1470         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1471                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1472         else
1473                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1474
1475         if (old_tx_mode != tp->tx_mode) {
1476                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1477         }
1478 }
1479
1480 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1481 {
1482         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1483         case MII_TG3_AUX_STAT_10HALF:
1484                 *speed = SPEED_10;
1485                 *duplex = DUPLEX_HALF;
1486                 break;
1487
1488         case MII_TG3_AUX_STAT_10FULL:
1489                 *speed = SPEED_10;
1490                 *duplex = DUPLEX_FULL;
1491                 break;
1492
1493         case MII_TG3_AUX_STAT_100HALF:
1494                 *speed = SPEED_100;
1495                 *duplex = DUPLEX_HALF;
1496                 break;
1497
1498         case MII_TG3_AUX_STAT_100FULL:
1499                 *speed = SPEED_100;
1500                 *duplex = DUPLEX_FULL;
1501                 break;
1502
1503         case MII_TG3_AUX_STAT_1000HALF:
1504                 *speed = SPEED_1000;
1505                 *duplex = DUPLEX_HALF;
1506                 break;
1507
1508         case MII_TG3_AUX_STAT_1000FULL:
1509                 *speed = SPEED_1000;
1510                 *duplex = DUPLEX_FULL;
1511                 break;
1512
1513         default:
1514                 *speed = SPEED_INVALID;
1515                 *duplex = DUPLEX_INVALID;
1516                 break;
1517         };
1518 }
1519
1520 static void tg3_phy_copper_begin(struct tg3 *tp)
1521 {
1522         u32 new_adv;
1523         int i;
1524
1525         if (tp->link_config.phy_is_low_power) {
1526                 /* Entering low power mode.  Disable gigabit and
1527                  * 100baseT advertisements.
1528                  */
1529                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1530
1531                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1532                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1533                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1534                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1535
1536                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1537         } else if (tp->link_config.speed == SPEED_INVALID) {
1538                 tp->link_config.advertising =
1539                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1540                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1541                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1542                          ADVERTISED_Autoneg | ADVERTISED_MII);
1543
1544                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1545                         tp->link_config.advertising &=
1546                                 ~(ADVERTISED_1000baseT_Half |
1547                                   ADVERTISED_1000baseT_Full);
1548
1549                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1550                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1551                         new_adv |= ADVERTISE_10HALF;
1552                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1553                         new_adv |= ADVERTISE_10FULL;
1554                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1555                         new_adv |= ADVERTISE_100HALF;
1556                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1557                         new_adv |= ADVERTISE_100FULL;
1558                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1559
1560                 if (tp->link_config.advertising &
1561                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1562                         new_adv = 0;
1563                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1564                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1565                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1566                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1567                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1568                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1569                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1570                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1571                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1572                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1573                 } else {
1574                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1575                 }
1576         } else {
1577                 /* Asking for a specific link mode. */
1578                 if (tp->link_config.speed == SPEED_1000) {
1579                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1580                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1581
1582                         if (tp->link_config.duplex == DUPLEX_FULL)
1583                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1584                         else
1585                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1586                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1587                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1588                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1589                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1590                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1591                 } else {
1592                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1593
1594                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1595                         if (tp->link_config.speed == SPEED_100) {
1596                                 if (tp->link_config.duplex == DUPLEX_FULL)
1597                                         new_adv |= ADVERTISE_100FULL;
1598                                 else
1599                                         new_adv |= ADVERTISE_100HALF;
1600                         } else {
1601                                 if (tp->link_config.duplex == DUPLEX_FULL)
1602                                         new_adv |= ADVERTISE_10FULL;
1603                                 else
1604                                         new_adv |= ADVERTISE_10HALF;
1605                         }
1606                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1607                 }
1608         }
1609
1610         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1611             tp->link_config.speed != SPEED_INVALID) {
1612                 u32 bmcr, orig_bmcr;
1613
1614                 tp->link_config.active_speed = tp->link_config.speed;
1615                 tp->link_config.active_duplex = tp->link_config.duplex;
1616
1617                 bmcr = 0;
1618                 switch (tp->link_config.speed) {
1619                 default:
1620                 case SPEED_10:
1621                         break;
1622
1623                 case SPEED_100:
1624                         bmcr |= BMCR_SPEED100;
1625                         break;
1626
1627                 case SPEED_1000:
1628                         bmcr |= TG3_BMCR_SPEED1000;
1629                         break;
1630                 };
1631
1632                 if (tp->link_config.duplex == DUPLEX_FULL)
1633                         bmcr |= BMCR_FULLDPLX;
1634
1635                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1636                     (bmcr != orig_bmcr)) {
1637                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1638                         for (i = 0; i < 1500; i++) {
1639                                 u32 tmp;
1640
1641                                 udelay(10);
1642                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1643                                     tg3_readphy(tp, MII_BMSR, &tmp))
1644                                         continue;
1645                                 if (!(tmp & BMSR_LSTATUS)) {
1646                                         udelay(40);
1647                                         break;
1648                                 }
1649                         }
1650                         tg3_writephy(tp, MII_BMCR, bmcr);
1651                         udelay(40);
1652                 }
1653         } else {
1654                 tg3_writephy(tp, MII_BMCR,
1655                              BMCR_ANENABLE | BMCR_ANRESTART);
1656         }
1657 }
1658
1659 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1660 {
1661         int err;
1662
1663         /* Turn off tap power management. */
1664         /* Set Extended packet length bit */
1665         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1666
1667         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1668         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1669
1670         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1671         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1672
1673         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1674         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1675
1676         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1677         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1678
1679         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1680         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1681
1682         udelay(40);
1683
1684         return err;
1685 }
1686
1687 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1688 {
1689         u32 adv_reg, all_mask;
1690
1691         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1692                 return 0;
1693
1694         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1695                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1696         if ((adv_reg & all_mask) != all_mask)
1697                 return 0;
1698         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1699                 u32 tg3_ctrl;
1700
1701                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1702                         return 0;
1703
1704                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1705                             MII_TG3_CTRL_ADV_1000_FULL);
1706                 if ((tg3_ctrl & all_mask) != all_mask)
1707                         return 0;
1708         }
1709         return 1;
1710 }
1711
1712 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1713 {
1714         int current_link_up;
1715         u32 bmsr, dummy;
1716         u16 current_speed;
1717         u8 current_duplex;
1718         int i, err;
1719
1720         tw32(MAC_EVENT, 0);
1721
1722         tw32_f(MAC_STATUS,
1723              (MAC_STATUS_SYNC_CHANGED |
1724               MAC_STATUS_CFG_CHANGED |
1725               MAC_STATUS_MI_COMPLETION |
1726               MAC_STATUS_LNKSTATE_CHANGED));
1727         udelay(40);
1728
1729         tp->mi_mode = MAC_MI_MODE_BASE;
1730         tw32_f(MAC_MI_MODE, tp->mi_mode);
1731         udelay(80);
1732
1733         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1734
1735         /* Some third-party PHYs need to be reset on link going
1736          * down.
1737          */
1738         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1739              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1740              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1741             netif_carrier_ok(tp->dev)) {
1742                 tg3_readphy(tp, MII_BMSR, &bmsr);
1743                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1744                     !(bmsr & BMSR_LSTATUS))
1745                         force_reset = 1;
1746         }
1747         if (force_reset)
1748                 tg3_phy_reset(tp);
1749
1750         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1751                 tg3_readphy(tp, MII_BMSR, &bmsr);
1752                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1753                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1754                         bmsr = 0;
1755
1756                 if (!(bmsr & BMSR_LSTATUS)) {
1757                         err = tg3_init_5401phy_dsp(tp);
1758                         if (err)
1759                                 return err;
1760
1761                         tg3_readphy(tp, MII_BMSR, &bmsr);
1762                         for (i = 0; i < 1000; i++) {
1763                                 udelay(10);
1764                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1765                                     (bmsr & BMSR_LSTATUS)) {
1766                                         udelay(40);
1767                                         break;
1768                                 }
1769                         }
1770
1771                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1772                             !(bmsr & BMSR_LSTATUS) &&
1773                             tp->link_config.active_speed == SPEED_1000) {
1774                                 err = tg3_phy_reset(tp);
1775                                 if (!err)
1776                                         err = tg3_init_5401phy_dsp(tp);
1777                                 if (err)
1778                                         return err;
1779                         }
1780                 }
1781         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1782                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1783                 /* 5701 {A0,B0} CRC bug workaround */
1784                 tg3_writephy(tp, 0x15, 0x0a75);
1785                 tg3_writephy(tp, 0x1c, 0x8c68);
1786                 tg3_writephy(tp, 0x1c, 0x8d68);
1787                 tg3_writephy(tp, 0x1c, 0x8c68);
1788         }
1789
1790         /* Clear pending interrupts... */
1791         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1792         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1793
1794         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1795                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1796         else
1797                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1798
1799         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1800             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1801                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1802                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1803                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1804                 else
1805                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1806         }
1807
1808         current_link_up = 0;
1809         current_speed = SPEED_INVALID;
1810         current_duplex = DUPLEX_INVALID;
1811
1812         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1813                 u32 val;
1814
1815                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1816                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1817                 if (!(val & (1 << 10))) {
1818                         val |= (1 << 10);
1819                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1820                         goto relink;
1821                 }
1822         }
1823
1824         bmsr = 0;
1825         for (i = 0; i < 100; i++) {
1826                 tg3_readphy(tp, MII_BMSR, &bmsr);
1827                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1828                     (bmsr & BMSR_LSTATUS))
1829                         break;
1830                 udelay(40);
1831         }
1832
1833         if (bmsr & BMSR_LSTATUS) {
1834                 u32 aux_stat, bmcr;
1835
1836                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1837                 for (i = 0; i < 2000; i++) {
1838                         udelay(10);
1839                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1840                             aux_stat)
1841                                 break;
1842                 }
1843
1844                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1845                                              &current_speed,
1846                                              &current_duplex);
1847
1848                 bmcr = 0;
1849                 for (i = 0; i < 200; i++) {
1850                         tg3_readphy(tp, MII_BMCR, &bmcr);
1851                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1852                                 continue;
1853                         if (bmcr && bmcr != 0x7fff)
1854                                 break;
1855                         udelay(10);
1856                 }
1857
1858                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1859                         if (bmcr & BMCR_ANENABLE) {
1860                                 current_link_up = 1;
1861
1862                                 /* Force autoneg restart if we are exiting
1863                                  * low power mode.
1864                                  */
1865                                 if (!tg3_copper_is_advertising_all(tp))
1866                                         current_link_up = 0;
1867                         } else {
1868                                 current_link_up = 0;
1869                         }
1870                 } else {
1871                         if (!(bmcr & BMCR_ANENABLE) &&
1872                             tp->link_config.speed == current_speed &&
1873                             tp->link_config.duplex == current_duplex) {
1874                                 current_link_up = 1;
1875                         } else {
1876                                 current_link_up = 0;
1877                         }
1878                 }
1879
1880                 tp->link_config.active_speed = current_speed;
1881                 tp->link_config.active_duplex = current_duplex;
1882         }
1883
1884         if (current_link_up == 1 &&
1885             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1886             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1887                 u32 local_adv, remote_adv;
1888
1889                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1890                         local_adv = 0;
1891                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1892
1893                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1894                         remote_adv = 0;
1895
1896                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1897
1898                 /* If we are not advertising full pause capability,
1899                  * something is wrong.  Bring the link down and reconfigure.
1900                  */
1901                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1902                         current_link_up = 0;
1903                 } else {
1904                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1905                 }
1906         }
1907 relink:
1908         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1909                 u32 tmp;
1910
1911                 tg3_phy_copper_begin(tp);
1912
1913                 tg3_readphy(tp, MII_BMSR, &tmp);
1914                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1915                     (tmp & BMSR_LSTATUS))
1916                         current_link_up = 1;
1917         }
1918
1919         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1920         if (current_link_up == 1) {
1921                 if (tp->link_config.active_speed == SPEED_100 ||
1922                     tp->link_config.active_speed == SPEED_10)
1923                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1924                 else
1925                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1926         } else
1927                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1928
1929         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1930         if (tp->link_config.active_duplex == DUPLEX_HALF)
1931                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1932
1933         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1934         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1935                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1936                     (current_link_up == 1 &&
1937                      tp->link_config.active_speed == SPEED_10))
1938                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1939         } else {
1940                 if (current_link_up == 1)
1941                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1942         }
1943
1944         /* ??? Without this setting Netgear GA302T PHY does not
1945          * ??? send/receive packets...
1946          */
1947         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1948             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1949                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1950                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1951                 udelay(80);
1952         }
1953
1954         tw32_f(MAC_MODE, tp->mac_mode);
1955         udelay(40);
1956
1957         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1958                 /* Polled via timer. */
1959                 tw32_f(MAC_EVENT, 0);
1960         } else {
1961                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1962         }
1963         udelay(40);
1964
1965         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1966             current_link_up == 1 &&
1967             tp->link_config.active_speed == SPEED_1000 &&
1968             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1969              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1970                 udelay(120);
1971                 tw32_f(MAC_STATUS,
1972                      (MAC_STATUS_SYNC_CHANGED |
1973                       MAC_STATUS_CFG_CHANGED));
1974                 udelay(40);
1975                 tg3_write_mem(tp,
1976                               NIC_SRAM_FIRMWARE_MBOX,
1977                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1978         }
1979
1980         if (current_link_up != netif_carrier_ok(tp->dev)) {
1981                 if (current_link_up)
1982                         netif_carrier_on(tp->dev);
1983                 else
1984                         netif_carrier_off(tp->dev);
1985                 tg3_link_report(tp);
1986         }
1987
1988         return 0;
1989 }
1990
1991 struct tg3_fiber_aneginfo {
1992         int state;
1993 #define ANEG_STATE_UNKNOWN              0
1994 #define ANEG_STATE_AN_ENABLE            1
1995 #define ANEG_STATE_RESTART_INIT         2
1996 #define ANEG_STATE_RESTART              3
1997 #define ANEG_STATE_DISABLE_LINK_OK      4
1998 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1999 #define ANEG_STATE_ABILITY_DETECT       6
2000 #define ANEG_STATE_ACK_DETECT_INIT      7
2001 #define ANEG_STATE_ACK_DETECT           8
2002 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2003 #define ANEG_STATE_COMPLETE_ACK         10
2004 #define ANEG_STATE_IDLE_DETECT_INIT     11
2005 #define ANEG_STATE_IDLE_DETECT          12
2006 #define ANEG_STATE_LINK_OK              13
2007 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2008 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2009
2010         u32 flags;
2011 #define MR_AN_ENABLE            0x00000001
2012 #define MR_RESTART_AN           0x00000002
2013 #define MR_AN_COMPLETE          0x00000004
2014 #define MR_PAGE_RX              0x00000008
2015 #define MR_NP_LOADED            0x00000010
2016 #define MR_TOGGLE_TX            0x00000020
2017 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2018 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2019 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2020 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2021 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2022 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2023 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2024 #define MR_TOGGLE_RX            0x00002000
2025 #define MR_NP_RX                0x00004000
2026
2027 #define MR_LINK_OK              0x80000000
2028
2029         unsigned long link_time, cur_time;
2030
2031         u32 ability_match_cfg;
2032         int ability_match_count;
2033
2034         char ability_match, idle_match, ack_match;
2035
2036         u32 txconfig, rxconfig;
2037 #define ANEG_CFG_NP             0x00000080
2038 #define ANEG_CFG_ACK            0x00000040
2039 #define ANEG_CFG_RF2            0x00000020
2040 #define ANEG_CFG_RF1            0x00000010
2041 #define ANEG_CFG_PS2            0x00000001
2042 #define ANEG_CFG_PS1            0x00008000
2043 #define ANEG_CFG_HD             0x00004000
2044 #define ANEG_CFG_FD             0x00002000
2045 #define ANEG_CFG_INVAL          0x00001f06
2046
2047 };
2048 #define ANEG_OK         0
2049 #define ANEG_DONE       1
2050 #define ANEG_TIMER_ENAB 2
2051 #define ANEG_FAILED     -1
2052
2053 #define ANEG_STATE_SETTLE_TIME  10000
2054
2055 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2056                                    struct tg3_fiber_aneginfo *ap)
2057 {
2058         unsigned long delta;
2059         u32 rx_cfg_reg;
2060         int ret;
2061
2062         if (ap->state == ANEG_STATE_UNKNOWN) {
2063                 ap->rxconfig = 0;
2064                 ap->link_time = 0;
2065                 ap->cur_time = 0;
2066                 ap->ability_match_cfg = 0;
2067                 ap->ability_match_count = 0;
2068                 ap->ability_match = 0;
2069                 ap->idle_match = 0;
2070                 ap->ack_match = 0;
2071         }
2072         ap->cur_time++;
2073
2074         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2075                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2076
2077                 if (rx_cfg_reg != ap->ability_match_cfg) {
2078                         ap->ability_match_cfg = rx_cfg_reg;
2079                         ap->ability_match = 0;
2080                         ap->ability_match_count = 0;
2081                 } else {
2082                         if (++ap->ability_match_count > 1) {
2083                                 ap->ability_match = 1;
2084                                 ap->ability_match_cfg = rx_cfg_reg;
2085                         }
2086                 }
2087                 if (rx_cfg_reg & ANEG_CFG_ACK)
2088                         ap->ack_match = 1;
2089                 else
2090                         ap->ack_match = 0;
2091
2092                 ap->idle_match = 0;
2093         } else {
2094                 ap->idle_match = 1;
2095                 ap->ability_match_cfg = 0;
2096                 ap->ability_match_count = 0;
2097                 ap->ability_match = 0;
2098                 ap->ack_match = 0;
2099
2100                 rx_cfg_reg = 0;
2101         }
2102
2103         ap->rxconfig = rx_cfg_reg;
2104         ret = ANEG_OK;
2105
2106         switch(ap->state) {
2107         case ANEG_STATE_UNKNOWN:
2108                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2109                         ap->state = ANEG_STATE_AN_ENABLE;
2110
2111                 /* fallthru */
2112         case ANEG_STATE_AN_ENABLE:
2113                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2114                 if (ap->flags & MR_AN_ENABLE) {
2115                         ap->link_time = 0;
2116                         ap->cur_time = 0;
2117                         ap->ability_match_cfg = 0;
2118                         ap->ability_match_count = 0;
2119                         ap->ability_match = 0;
2120                         ap->idle_match = 0;
2121                         ap->ack_match = 0;
2122
2123                         ap->state = ANEG_STATE_RESTART_INIT;
2124                 } else {
2125                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2126                 }
2127                 break;
2128
2129         case ANEG_STATE_RESTART_INIT:
2130                 ap->link_time = ap->cur_time;
2131                 ap->flags &= ~(MR_NP_LOADED);
2132                 ap->txconfig = 0;
2133                 tw32(MAC_TX_AUTO_NEG, 0);
2134                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2135                 tw32_f(MAC_MODE, tp->mac_mode);
2136                 udelay(40);
2137
2138                 ret = ANEG_TIMER_ENAB;
2139                 ap->state = ANEG_STATE_RESTART;
2140
2141                 /* fallthru */
2142         case ANEG_STATE_RESTART:
2143                 delta = ap->cur_time - ap->link_time;
2144                 if (delta > ANEG_STATE_SETTLE_TIME) {
2145                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2146                 } else {
2147                         ret = ANEG_TIMER_ENAB;
2148                 }
2149                 break;
2150
2151         case ANEG_STATE_DISABLE_LINK_OK:
2152                 ret = ANEG_DONE;
2153                 break;
2154
2155         case ANEG_STATE_ABILITY_DETECT_INIT:
2156                 ap->flags &= ~(MR_TOGGLE_TX);
2157                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2158                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2159                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2160                 tw32_f(MAC_MODE, tp->mac_mode);
2161                 udelay(40);
2162
2163                 ap->state = ANEG_STATE_ABILITY_DETECT;
2164                 break;
2165
2166         case ANEG_STATE_ABILITY_DETECT:
2167                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2168                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2169                 }
2170                 break;
2171
2172         case ANEG_STATE_ACK_DETECT_INIT:
2173                 ap->txconfig |= ANEG_CFG_ACK;
2174                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2175                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2176                 tw32_f(MAC_MODE, tp->mac_mode);
2177                 udelay(40);
2178
2179                 ap->state = ANEG_STATE_ACK_DETECT;
2180
2181                 /* fallthru */
2182         case ANEG_STATE_ACK_DETECT:
2183                 if (ap->ack_match != 0) {
2184                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2185                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2186                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2187                         } else {
2188                                 ap->state = ANEG_STATE_AN_ENABLE;
2189                         }
2190                 } else if (ap->ability_match != 0 &&
2191                            ap->rxconfig == 0) {
2192                         ap->state = ANEG_STATE_AN_ENABLE;
2193                 }
2194                 break;
2195
2196         case ANEG_STATE_COMPLETE_ACK_INIT:
2197                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2198                         ret = ANEG_FAILED;
2199                         break;
2200                 }
2201                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2202                                MR_LP_ADV_HALF_DUPLEX |
2203                                MR_LP_ADV_SYM_PAUSE |
2204                                MR_LP_ADV_ASYM_PAUSE |
2205                                MR_LP_ADV_REMOTE_FAULT1 |
2206                                MR_LP_ADV_REMOTE_FAULT2 |
2207                                MR_LP_ADV_NEXT_PAGE |
2208                                MR_TOGGLE_RX |
2209                                MR_NP_RX);
2210                 if (ap->rxconfig & ANEG_CFG_FD)
2211                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2212                 if (ap->rxconfig & ANEG_CFG_HD)
2213                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2214                 if (ap->rxconfig & ANEG_CFG_PS1)
2215                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2216                 if (ap->rxconfig & ANEG_CFG_PS2)
2217                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2218                 if (ap->rxconfig & ANEG_CFG_RF1)
2219                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2220                 if (ap->rxconfig & ANEG_CFG_RF2)
2221                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2222                 if (ap->rxconfig & ANEG_CFG_NP)
2223                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2224
2225                 ap->link_time = ap->cur_time;
2226
2227                 ap->flags ^= (MR_TOGGLE_TX);
2228                 if (ap->rxconfig & 0x0008)
2229                         ap->flags |= MR_TOGGLE_RX;
2230                 if (ap->rxconfig & ANEG_CFG_NP)
2231                         ap->flags |= MR_NP_RX;
2232                 ap->flags |= MR_PAGE_RX;
2233
2234                 ap->state = ANEG_STATE_COMPLETE_ACK;
2235                 ret = ANEG_TIMER_ENAB;
2236                 break;
2237
2238         case ANEG_STATE_COMPLETE_ACK:
2239                 if (ap->ability_match != 0 &&
2240                     ap->rxconfig == 0) {
2241                         ap->state = ANEG_STATE_AN_ENABLE;
2242                         break;
2243                 }
2244                 delta = ap->cur_time - ap->link_time;
2245                 if (delta > ANEG_STATE_SETTLE_TIME) {
2246                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2247                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2248                         } else {
2249                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2250                                     !(ap->flags & MR_NP_RX)) {
2251                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2252                                 } else {
2253                                         ret = ANEG_FAILED;
2254                                 }
2255                         }
2256                 }
2257                 break;
2258
2259         case ANEG_STATE_IDLE_DETECT_INIT:
2260                 ap->link_time = ap->cur_time;
2261                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2262                 tw32_f(MAC_MODE, tp->mac_mode);
2263                 udelay(40);
2264
2265                 ap->state = ANEG_STATE_IDLE_DETECT;
2266                 ret = ANEG_TIMER_ENAB;
2267                 break;
2268
2269         case ANEG_STATE_IDLE_DETECT:
2270                 if (ap->ability_match != 0 &&
2271                     ap->rxconfig == 0) {
2272                         ap->state = ANEG_STATE_AN_ENABLE;
2273                         break;
2274                 }
2275                 delta = ap->cur_time - ap->link_time;
2276                 if (delta > ANEG_STATE_SETTLE_TIME) {
2277                         /* XXX another gem from the Broadcom driver :( */
2278                         ap->state = ANEG_STATE_LINK_OK;
2279                 }
2280                 break;
2281
2282         case ANEG_STATE_LINK_OK:
2283                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2284                 ret = ANEG_DONE;
2285                 break;
2286
2287         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2288                 /* ??? unimplemented */
2289                 break;
2290
2291         case ANEG_STATE_NEXT_PAGE_WAIT:
2292                 /* ??? unimplemented */
2293                 break;
2294
2295         default:
2296                 ret = ANEG_FAILED;
2297                 break;
2298         };
2299
2300         return ret;
2301 }
2302
2303 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2304 {
2305         int res = 0;
2306         struct tg3_fiber_aneginfo aninfo;
2307         int status = ANEG_FAILED;
2308         unsigned int tick;
2309         u32 tmp;
2310
2311         tw32_f(MAC_TX_AUTO_NEG, 0);
2312
2313         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2314         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2315         udelay(40);
2316
2317         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2318         udelay(40);
2319
2320         memset(&aninfo, 0, sizeof(aninfo));
2321         aninfo.flags |= MR_AN_ENABLE;
2322         aninfo.state = ANEG_STATE_UNKNOWN;
2323         aninfo.cur_time = 0;
2324         tick = 0;
2325         while (++tick < 195000) {
2326                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2327                 if (status == ANEG_DONE || status == ANEG_FAILED)
2328                         break;
2329
2330                 udelay(1);
2331         }
2332
2333         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2334         tw32_f(MAC_MODE, tp->mac_mode);
2335         udelay(40);
2336
2337         *flags = aninfo.flags;
2338
2339         if (status == ANEG_DONE &&
2340             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2341                              MR_LP_ADV_FULL_DUPLEX)))
2342                 res = 1;
2343
2344         return res;
2345 }
2346
2347 static void tg3_init_bcm8002(struct tg3 *tp)
2348 {
2349         u32 mac_status = tr32(MAC_STATUS);
2350         int i;
2351
2352         /* Reset when initting first time or we have a link. */
2353         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2354             !(mac_status & MAC_STATUS_PCS_SYNCED))
2355                 return;
2356
2357         /* Set PLL lock range. */
2358         tg3_writephy(tp, 0x16, 0x8007);
2359
2360         /* SW reset */
2361         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2362
2363         /* Wait for reset to complete. */
2364         /* XXX schedule_timeout() ... */
2365         for (i = 0; i < 500; i++)
2366                 udelay(10);
2367
2368         /* Config mode; select PMA/Ch 1 regs. */
2369         tg3_writephy(tp, 0x10, 0x8411);
2370
2371         /* Enable auto-lock and comdet, select txclk for tx. */
2372         tg3_writephy(tp, 0x11, 0x0a10);
2373
2374         tg3_writephy(tp, 0x18, 0x00a0);
2375         tg3_writephy(tp, 0x16, 0x41ff);
2376
2377         /* Assert and deassert POR. */
2378         tg3_writephy(tp, 0x13, 0x0400);
2379         udelay(40);
2380         tg3_writephy(tp, 0x13, 0x0000);
2381
2382         tg3_writephy(tp, 0x11, 0x0a50);
2383         udelay(40);
2384         tg3_writephy(tp, 0x11, 0x0a10);
2385
2386         /* Wait for signal to stabilize */
2387         /* XXX schedule_timeout() ... */
2388         for (i = 0; i < 15000; i++)
2389                 udelay(10);
2390
2391         /* Deselect the channel register so we can read the PHYID
2392          * later.
2393          */
2394         tg3_writephy(tp, 0x10, 0x8011);
2395 }
2396
2397 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2398 {
2399         u32 sg_dig_ctrl, sg_dig_status;
2400         u32 serdes_cfg, expected_sg_dig_ctrl;
2401         int workaround, port_a;
2402         int current_link_up;
2403
2404         serdes_cfg = 0;
2405         expected_sg_dig_ctrl = 0;
2406         workaround = 0;
2407         port_a = 1;
2408         current_link_up = 0;
2409
2410         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2411             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2412                 workaround = 1;
2413                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2414                         port_a = 0;
2415
2416                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2417                 /* preserve bits 20-23 for voltage regulator */
2418                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2419         }
2420
2421         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2422
2423         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2424                 if (sg_dig_ctrl & (1 << 31)) {
2425                         if (workaround) {
2426                                 u32 val = serdes_cfg;
2427
2428                                 if (port_a)
2429                                         val |= 0xc010000;
2430                                 else
2431                                         val |= 0x4010000;
2432                                 tw32_f(MAC_SERDES_CFG, val);
2433                         }
2434                         tw32_f(SG_DIG_CTRL, 0x01388400);
2435                 }
2436                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2437                         tg3_setup_flow_control(tp, 0, 0);
2438                         current_link_up = 1;
2439                 }
2440                 goto out;
2441         }
2442
2443         /* Want auto-negotiation.  */
2444         expected_sg_dig_ctrl = 0x81388400;
2445
2446         /* Pause capability */
2447         expected_sg_dig_ctrl |= (1 << 11);
2448
2449         /* Asymettric pause */
2450         expected_sg_dig_ctrl |= (1 << 12);
2451
2452         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2453                 if (workaround)
2454                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2455                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2456                 udelay(5);
2457                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2458
2459                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2460         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2461                                  MAC_STATUS_SIGNAL_DET)) {
2462                 int i;
2463
2464                 /* Giver time to negotiate (~200ms) */
2465                 for (i = 0; i < 40000; i++) {
2466                         sg_dig_status = tr32(SG_DIG_STATUS);
2467                         if (sg_dig_status & (0x3))
2468                                 break;
2469                         udelay(5);
2470                 }
2471                 mac_status = tr32(MAC_STATUS);
2472
2473                 if ((sg_dig_status & (1 << 1)) &&
2474                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2475                         u32 local_adv, remote_adv;
2476
2477                         local_adv = ADVERTISE_PAUSE_CAP;
2478                         remote_adv = 0;
2479                         if (sg_dig_status & (1 << 19))
2480                                 remote_adv |= LPA_PAUSE_CAP;
2481                         if (sg_dig_status & (1 << 20))
2482                                 remote_adv |= LPA_PAUSE_ASYM;
2483
2484                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2485                         current_link_up = 1;
2486                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2487                 } else if (!(sg_dig_status & (1 << 1))) {
2488                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2489                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2490                         else {
2491                                 if (workaround) {
2492                                         u32 val = serdes_cfg;
2493
2494                                         if (port_a)
2495                                                 val |= 0xc010000;
2496                                         else
2497                                                 val |= 0x4010000;
2498
2499                                         tw32_f(MAC_SERDES_CFG, val);
2500                                 }
2501
2502                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2503                                 udelay(40);
2504
2505                                 /* Link parallel detection - link is up */
2506                                 /* only if we have PCS_SYNC and not */
2507                                 /* receiving config code words */
2508                                 mac_status = tr32(MAC_STATUS);
2509                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2510                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2511                                         tg3_setup_flow_control(tp, 0, 0);
2512                                         current_link_up = 1;
2513                                 }
2514                         }
2515                 }
2516         }
2517
2518 out:
2519         return current_link_up;
2520 }
2521
2522 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2523 {
2524         int current_link_up = 0;
2525
2526         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2527                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2528                 goto out;
2529         }
2530
2531         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2532                 u32 flags;
2533                 int i;
2534   
2535                 if (fiber_autoneg(tp, &flags)) {
2536                         u32 local_adv, remote_adv;
2537
2538                         local_adv = ADVERTISE_PAUSE_CAP;
2539                         remote_adv = 0;
2540                         if (flags & MR_LP_ADV_SYM_PAUSE)
2541                                 remote_adv |= LPA_PAUSE_CAP;
2542                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2543                                 remote_adv |= LPA_PAUSE_ASYM;
2544
2545                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2546
2547                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2548                         current_link_up = 1;
2549                 }
2550                 for (i = 0; i < 30; i++) {
2551                         udelay(20);
2552                         tw32_f(MAC_STATUS,
2553                                (MAC_STATUS_SYNC_CHANGED |
2554                                 MAC_STATUS_CFG_CHANGED));
2555                         udelay(40);
2556                         if ((tr32(MAC_STATUS) &
2557                              (MAC_STATUS_SYNC_CHANGED |
2558                               MAC_STATUS_CFG_CHANGED)) == 0)
2559                                 break;
2560                 }
2561
2562                 mac_status = tr32(MAC_STATUS);
2563                 if (current_link_up == 0 &&
2564                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2565                     !(mac_status & MAC_STATUS_RCVD_CFG))
2566                         current_link_up = 1;
2567         } else {
2568                 /* Forcing 1000FD link up. */
2569                 current_link_up = 1;
2570                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2571
2572                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2573                 udelay(40);
2574         }
2575
2576 out:
2577         return current_link_up;
2578 }
2579
2580 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2581 {
2582         u32 orig_pause_cfg;
2583         u16 orig_active_speed;
2584         u8 orig_active_duplex;
2585         u32 mac_status;
2586         int current_link_up;
2587         int i;
2588
2589         orig_pause_cfg =
2590                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2591                                   TG3_FLAG_TX_PAUSE));
2592         orig_active_speed = tp->link_config.active_speed;
2593         orig_active_duplex = tp->link_config.active_duplex;
2594
2595         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2596             netif_carrier_ok(tp->dev) &&
2597             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2598                 mac_status = tr32(MAC_STATUS);
2599                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2600                                MAC_STATUS_SIGNAL_DET |
2601                                MAC_STATUS_CFG_CHANGED |
2602                                MAC_STATUS_RCVD_CFG);
2603                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2604                                    MAC_STATUS_SIGNAL_DET)) {
2605                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2606                                             MAC_STATUS_CFG_CHANGED));
2607                         return 0;
2608                 }
2609         }
2610
2611         tw32_f(MAC_TX_AUTO_NEG, 0);
2612
2613         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2614         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2615         tw32_f(MAC_MODE, tp->mac_mode);
2616         udelay(40);
2617
2618         if (tp->phy_id == PHY_ID_BCM8002)
2619                 tg3_init_bcm8002(tp);
2620
2621         /* Enable link change event even when serdes polling.  */
2622         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2623         udelay(40);
2624
2625         current_link_up = 0;
2626         mac_status = tr32(MAC_STATUS);
2627
2628         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2629                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2630         else
2631                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2632
2633         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2634         tw32_f(MAC_MODE, tp->mac_mode);
2635         udelay(40);
2636
2637         tp->hw_status->status =
2638                 (SD_STATUS_UPDATED |
2639                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2640
2641         for (i = 0; i < 100; i++) {
2642                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2643                                     MAC_STATUS_CFG_CHANGED));
2644                 udelay(5);
2645                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2646                                          MAC_STATUS_CFG_CHANGED)) == 0)
2647                         break;
2648         }
2649
2650         mac_status = tr32(MAC_STATUS);
2651         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2652                 current_link_up = 0;
2653                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2654                         tw32_f(MAC_MODE, (tp->mac_mode |
2655                                           MAC_MODE_SEND_CONFIGS));
2656                         udelay(1);
2657                         tw32_f(MAC_MODE, tp->mac_mode);
2658                 }
2659         }
2660
2661         if (current_link_up == 1) {
2662                 tp->link_config.active_speed = SPEED_1000;
2663                 tp->link_config.active_duplex = DUPLEX_FULL;
2664                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2665                                     LED_CTRL_LNKLED_OVERRIDE |
2666                                     LED_CTRL_1000MBPS_ON));
2667         } else {
2668                 tp->link_config.active_speed = SPEED_INVALID;
2669                 tp->link_config.active_duplex = DUPLEX_INVALID;
2670                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2671                                     LED_CTRL_LNKLED_OVERRIDE |
2672                                     LED_CTRL_TRAFFIC_OVERRIDE));
2673         }
2674
2675         if (current_link_up != netif_carrier_ok(tp->dev)) {
2676                 if (current_link_up)
2677                         netif_carrier_on(tp->dev);
2678                 else
2679                         netif_carrier_off(tp->dev);
2680                 tg3_link_report(tp);
2681         } else {
2682                 u32 now_pause_cfg =
2683                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2684                                          TG3_FLAG_TX_PAUSE);
2685                 if (orig_pause_cfg != now_pause_cfg ||
2686                     orig_active_speed != tp->link_config.active_speed ||
2687                     orig_active_duplex != tp->link_config.active_duplex)
2688                         tg3_link_report(tp);
2689         }
2690
2691         return 0;
2692 }
2693
2694 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2695 {
2696         int current_link_up, err = 0;
2697         u32 bmsr, bmcr;
2698         u16 current_speed;
2699         u8 current_duplex;
2700
2701         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2702         tw32_f(MAC_MODE, tp->mac_mode);
2703         udelay(40);
2704
2705         tw32(MAC_EVENT, 0);
2706
2707         tw32_f(MAC_STATUS,
2708              (MAC_STATUS_SYNC_CHANGED |
2709               MAC_STATUS_CFG_CHANGED |
2710               MAC_STATUS_MI_COMPLETION |
2711               MAC_STATUS_LNKSTATE_CHANGED));
2712         udelay(40);
2713
2714         if (force_reset)
2715                 tg3_phy_reset(tp);
2716
2717         current_link_up = 0;
2718         current_speed = SPEED_INVALID;
2719         current_duplex = DUPLEX_INVALID;
2720
2721         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2722         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2723         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2724                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2725                         bmsr |= BMSR_LSTATUS;
2726                 else
2727                         bmsr &= ~BMSR_LSTATUS;
2728         }
2729
2730         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2731
2732         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2733             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2734                 /* do nothing, just check for link up at the end */
2735         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2736                 u32 adv, new_adv;
2737
2738                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2739                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2740                                   ADVERTISE_1000XPAUSE |
2741                                   ADVERTISE_1000XPSE_ASYM |
2742                                   ADVERTISE_SLCT);
2743
2744                 /* Always advertise symmetric PAUSE just like copper */
2745                 new_adv |= ADVERTISE_1000XPAUSE;
2746
2747                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2748                         new_adv |= ADVERTISE_1000XHALF;
2749                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2750                         new_adv |= ADVERTISE_1000XFULL;
2751
2752                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2753                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2754                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2755                         tg3_writephy(tp, MII_BMCR, bmcr);
2756
2757                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2758                         tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2759                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2760
2761                         return err;
2762                 }
2763         } else {
2764                 u32 new_bmcr;
2765
2766                 bmcr &= ~BMCR_SPEED1000;
2767                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2768
2769                 if (tp->link_config.duplex == DUPLEX_FULL)
2770                         new_bmcr |= BMCR_FULLDPLX;
2771
2772                 if (new_bmcr != bmcr) {
2773                         /* BMCR_SPEED1000 is a reserved bit that needs
2774                          * to be set on write.
2775                          */
2776                         new_bmcr |= BMCR_SPEED1000;
2777
2778                         /* Force a linkdown */
2779                         if (netif_carrier_ok(tp->dev)) {
2780                                 u32 adv;
2781
2782                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2783                                 adv &= ~(ADVERTISE_1000XFULL |
2784                                          ADVERTISE_1000XHALF |
2785                                          ADVERTISE_SLCT);
2786                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2787                                 tg3_writephy(tp, MII_BMCR, bmcr |
2788                                                            BMCR_ANRESTART |
2789                                                            BMCR_ANENABLE);
2790                                 udelay(10);
2791                                 netif_carrier_off(tp->dev);
2792                         }
2793                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2794                         bmcr = new_bmcr;
2795                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2796                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2797                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2798                             ASIC_REV_5714) {
2799                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2800                                         bmsr |= BMSR_LSTATUS;
2801                                 else
2802                                         bmsr &= ~BMSR_LSTATUS;
2803                         }
2804                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2805                 }
2806         }
2807
2808         if (bmsr & BMSR_LSTATUS) {
2809                 current_speed = SPEED_1000;
2810                 current_link_up = 1;
2811                 if (bmcr & BMCR_FULLDPLX)
2812                         current_duplex = DUPLEX_FULL;
2813                 else
2814                         current_duplex = DUPLEX_HALF;
2815
2816                 if (bmcr & BMCR_ANENABLE) {
2817                         u32 local_adv, remote_adv, common;
2818
2819                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2820                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2821                         common = local_adv & remote_adv;
2822                         if (common & (ADVERTISE_1000XHALF |
2823                                       ADVERTISE_1000XFULL)) {
2824                                 if (common & ADVERTISE_1000XFULL)
2825                                         current_duplex = DUPLEX_FULL;
2826                                 else
2827                                         current_duplex = DUPLEX_HALF;
2828
2829                                 tg3_setup_flow_control(tp, local_adv,
2830                                                        remote_adv);
2831                         }
2832                         else
2833                                 current_link_up = 0;
2834                 }
2835         }
2836
2837         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2838         if (tp->link_config.active_duplex == DUPLEX_HALF)
2839                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2840
2841         tw32_f(MAC_MODE, tp->mac_mode);
2842         udelay(40);
2843
2844         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2845
2846         tp->link_config.active_speed = current_speed;
2847         tp->link_config.active_duplex = current_duplex;
2848
2849         if (current_link_up != netif_carrier_ok(tp->dev)) {
2850                 if (current_link_up)
2851                         netif_carrier_on(tp->dev);
2852                 else {
2853                         netif_carrier_off(tp->dev);
2854                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2855                 }
2856                 tg3_link_report(tp);
2857         }
2858         return err;
2859 }
2860
2861 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2862 {
2863         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED) {
2864                 /* Give autoneg time to complete. */
2865                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2866                 return;
2867         }
2868         if (!netif_carrier_ok(tp->dev) &&
2869             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2870                 u32 bmcr;
2871
2872                 tg3_readphy(tp, MII_BMCR, &bmcr);
2873                 if (bmcr & BMCR_ANENABLE) {
2874                         u32 phy1, phy2;
2875
2876                         /* Select shadow register 0x1f */
2877                         tg3_writephy(tp, 0x1c, 0x7c00);
2878                         tg3_readphy(tp, 0x1c, &phy1);
2879
2880                         /* Select expansion interrupt status register */
2881                         tg3_writephy(tp, 0x17, 0x0f01);
2882                         tg3_readphy(tp, 0x15, &phy2);
2883                         tg3_readphy(tp, 0x15, &phy2);
2884
2885                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2886                                 /* We have signal detect and not receiving
2887                                  * config code words, link is up by parallel
2888                                  * detection.
2889                                  */
2890
2891                                 bmcr &= ~BMCR_ANENABLE;
2892                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2893                                 tg3_writephy(tp, MII_BMCR, bmcr);
2894                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2895                         }
2896                 }
2897         }
2898         else if (netif_carrier_ok(tp->dev) &&
2899                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2900                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2901                 u32 phy2;
2902
2903                 /* Select expansion interrupt status register */
2904                 tg3_writephy(tp, 0x17, 0x0f01);
2905                 tg3_readphy(tp, 0x15, &phy2);
2906                 if (phy2 & 0x20) {
2907                         u32 bmcr;
2908
2909                         /* Config code words received, turn on autoneg. */
2910                         tg3_readphy(tp, MII_BMCR, &bmcr);
2911                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2912
2913                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2914
2915                 }
2916         }
2917 }
2918
2919 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2920 {
2921         int err;
2922
2923         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2924                 err = tg3_setup_fiber_phy(tp, force_reset);
2925         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2926                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2927         } else {
2928                 err = tg3_setup_copper_phy(tp, force_reset);
2929         }
2930
2931         if (tp->link_config.active_speed == SPEED_1000 &&
2932             tp->link_config.active_duplex == DUPLEX_HALF)
2933                 tw32(MAC_TX_LENGTHS,
2934                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2935                       (6 << TX_LENGTHS_IPG_SHIFT) |
2936                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2937         else
2938                 tw32(MAC_TX_LENGTHS,
2939                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2940                       (6 << TX_LENGTHS_IPG_SHIFT) |
2941                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2942
2943         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2944                 if (netif_carrier_ok(tp->dev)) {
2945                         tw32(HOSTCC_STAT_COAL_TICKS,
2946                              tp->coal.stats_block_coalesce_usecs);
2947                 } else {
2948                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2949                 }
2950         }
2951
2952         return err;
2953 }
2954
2955 /* Tigon3 never reports partial packet sends.  So we do not
2956  * need special logic to handle SKBs that have not had all
2957  * of their frags sent yet, like SunGEM does.
2958  */
2959 static void tg3_tx(struct tg3 *tp)
2960 {
2961         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2962         u32 sw_idx = tp->tx_cons;
2963
2964         while (sw_idx != hw_idx) {
2965                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2966                 struct sk_buff *skb = ri->skb;
2967                 int i;
2968
2969                 if (unlikely(skb == NULL))
2970                         BUG();
2971
2972                 pci_unmap_single(tp->pdev,
2973                                  pci_unmap_addr(ri, mapping),
2974                                  skb_headlen(skb),
2975                                  PCI_DMA_TODEVICE);
2976
2977                 ri->skb = NULL;
2978
2979                 sw_idx = NEXT_TX(sw_idx);
2980
2981                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2982                         if (unlikely(sw_idx == hw_idx))
2983                                 BUG();
2984
2985                         ri = &tp->tx_buffers[sw_idx];
2986                         if (unlikely(ri->skb != NULL))
2987                                 BUG();
2988
2989                         pci_unmap_page(tp->pdev,
2990                                        pci_unmap_addr(ri, mapping),
2991                                        skb_shinfo(skb)->frags[i].size,
2992                                        PCI_DMA_TODEVICE);
2993
2994                         sw_idx = NEXT_TX(sw_idx);
2995                 }
2996
2997                 dev_kfree_skb(skb);
2998         }
2999
3000         tp->tx_cons = sw_idx;
3001
3002         if (unlikely(netif_queue_stopped(tp->dev))) {
3003                 spin_lock(&tp->tx_lock);
3004                 if (netif_queue_stopped(tp->dev) &&
3005                     (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
3006                         netif_wake_queue(tp->dev);
3007                 spin_unlock(&tp->tx_lock);
3008         }
3009 }
3010
3011 /* Returns size of skb allocated or < 0 on error.
3012  *
3013  * We only need to fill in the address because the other members
3014  * of the RX descriptor are invariant, see tg3_init_rings.
3015  *
3016  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3017  * posting buffers we only dirty the first cache line of the RX
3018  * descriptor (containing the address).  Whereas for the RX status
3019  * buffers the cpu only reads the last cacheline of the RX descriptor
3020  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3021  */
3022 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3023                             int src_idx, u32 dest_idx_unmasked)
3024 {
3025         struct tg3_rx_buffer_desc *desc;
3026         struct ring_info *map, *src_map;
3027         struct sk_buff *skb;
3028         dma_addr_t mapping;
3029         int skb_size, dest_idx;
3030
3031         src_map = NULL;
3032         switch (opaque_key) {
3033         case RXD_OPAQUE_RING_STD:
3034                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3035                 desc = &tp->rx_std[dest_idx];
3036                 map = &tp->rx_std_buffers[dest_idx];
3037                 if (src_idx >= 0)
3038                         src_map = &tp->rx_std_buffers[src_idx];
3039                 skb_size = tp->rx_pkt_buf_sz;
3040                 break;
3041
3042         case RXD_OPAQUE_RING_JUMBO:
3043                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3044                 desc = &tp->rx_jumbo[dest_idx];
3045                 map = &tp->rx_jumbo_buffers[dest_idx];
3046                 if (src_idx >= 0)
3047                         src_map = &tp->rx_jumbo_buffers[src_idx];
3048                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3049                 break;
3050
3051         default:
3052                 return -EINVAL;
3053         };
3054
3055         /* Do not overwrite any of the map or rp information
3056          * until we are sure we can commit to a new buffer.
3057          *
3058          * Callers depend upon this behavior and assume that
3059          * we leave everything unchanged if we fail.
3060          */
3061         skb = dev_alloc_skb(skb_size);
3062         if (skb == NULL)
3063                 return -ENOMEM;
3064
3065         skb->dev = tp->dev;
3066         skb_reserve(skb, tp->rx_offset);
3067
3068         mapping = pci_map_single(tp->pdev, skb->data,
3069                                  skb_size - tp->rx_offset,
3070                                  PCI_DMA_FROMDEVICE);
3071
3072         map->skb = skb;
3073         pci_unmap_addr_set(map, mapping, mapping);
3074
3075         if (src_map != NULL)
3076                 src_map->skb = NULL;
3077
3078         desc->addr_hi = ((u64)mapping >> 32);
3079         desc->addr_lo = ((u64)mapping & 0xffffffff);
3080
3081         return skb_size;
3082 }
3083
3084 /* We only need to move over in the address because the other
3085  * members of the RX descriptor are invariant.  See notes above
3086  * tg3_alloc_rx_skb for full details.
3087  */
3088 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3089                            int src_idx, u32 dest_idx_unmasked)
3090 {
3091         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3092         struct ring_info *src_map, *dest_map;
3093         int dest_idx;
3094
3095         switch (opaque_key) {
3096         case RXD_OPAQUE_RING_STD:
3097                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3098                 dest_desc = &tp->rx_std[dest_idx];
3099                 dest_map = &tp->rx_std_buffers[dest_idx];
3100                 src_desc = &tp->rx_std[src_idx];
3101                 src_map = &tp->rx_std_buffers[src_idx];
3102                 break;
3103
3104         case RXD_OPAQUE_RING_JUMBO:
3105                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3106                 dest_desc = &tp->rx_jumbo[dest_idx];
3107                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3108                 src_desc = &tp->rx_jumbo[src_idx];
3109                 src_map = &tp->rx_jumbo_buffers[src_idx];
3110                 break;
3111
3112         default:
3113                 return;
3114         };
3115
3116         dest_map->skb = src_map->skb;
3117         pci_unmap_addr_set(dest_map, mapping,
3118                            pci_unmap_addr(src_map, mapping));
3119         dest_desc->addr_hi = src_desc->addr_hi;
3120         dest_desc->addr_lo = src_desc->addr_lo;
3121
3122         src_map->skb = NULL;
3123 }
3124
3125 #if TG3_VLAN_TAG_USED
3126 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3127 {
3128         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3129 }
3130 #endif
3131
3132 /* The RX ring scheme is composed of multiple rings which post fresh
3133  * buffers to the chip, and one special ring the chip uses to report
3134  * status back to the host.
3135  *
3136  * The special ring reports the status of received packets to the
3137  * host.  The chip does not write into the original descriptor the
3138  * RX buffer was obtained from.  The chip simply takes the original
3139  * descriptor as provided by the host, updates the status and length
3140  * field, then writes this into the next status ring entry.
3141  *
3142  * Each ring the host uses to post buffers to the chip is described
3143  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3144  * it is first placed into the on-chip ram.  When the packet's length
3145  * is known, it walks down the TG3_BDINFO entries to select the ring.
3146  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3147  * which is within the range of the new packet's length is chosen.
3148  *
3149  * The "separate ring for rx status" scheme may sound queer, but it makes
3150  * sense from a cache coherency perspective.  If only the host writes
3151  * to the buffer post rings, and only the chip writes to the rx status
3152  * rings, then cache lines never move beyond shared-modified state.
3153  * If both the host and chip were to write into the same ring, cache line
3154  * eviction could occur since both entities want it in an exclusive state.
3155  */
3156 static int tg3_rx(struct tg3 *tp, int budget)
3157 {
3158         u32 work_mask;
3159         u32 sw_idx = tp->rx_rcb_ptr;
3160         u16 hw_idx;
3161         int received;
3162
3163         hw_idx = tp->hw_status->idx[0].rx_producer;
3164         /*
3165          * We need to order the read of hw_idx and the read of
3166          * the opaque cookie.
3167          */
3168         rmb();
3169         work_mask = 0;
3170         received = 0;
3171         while (sw_idx != hw_idx && budget > 0) {
3172                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3173                 unsigned int len;
3174                 struct sk_buff *skb;
3175                 dma_addr_t dma_addr;
3176                 u32 opaque_key, desc_idx, *post_ptr;
3177
3178                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3179                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3180                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3181                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3182                                                   mapping);
3183                         skb = tp->rx_std_buffers[desc_idx].skb;
3184                         post_ptr = &tp->rx_std_ptr;
3185                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3186                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3187                                                   mapping);
3188                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3189                         post_ptr = &tp->rx_jumbo_ptr;
3190                 }
3191                 else {
3192                         goto next_pkt_nopost;
3193                 }
3194
3195                 work_mask |= opaque_key;
3196
3197                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3198                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3199                 drop_it:
3200                         tg3_recycle_rx(tp, opaque_key,
3201                                        desc_idx, *post_ptr);
3202                 drop_it_no_recycle:
3203                         /* Other statistics kept track of by card. */
3204                         tp->net_stats.rx_dropped++;
3205                         goto next_pkt;
3206                 }
3207
3208                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3209
3210                 if (len > RX_COPY_THRESHOLD 
3211                         && tp->rx_offset == 2
3212                         /* rx_offset != 2 iff this is a 5701 card running
3213                          * in PCI-X mode [see tg3_get_invariants()] */
3214                 ) {
3215                         int skb_size;
3216
3217                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3218                                                     desc_idx, *post_ptr);
3219                         if (skb_size < 0)
3220                                 goto drop_it;
3221
3222                         pci_unmap_single(tp->pdev, dma_addr,
3223                                          skb_size - tp->rx_offset,
3224                                          PCI_DMA_FROMDEVICE);
3225
3226                         skb_put(skb, len);
3227                 } else {
3228                         struct sk_buff *copy_skb;
3229
3230                         tg3_recycle_rx(tp, opaque_key,
3231                                        desc_idx, *post_ptr);
3232
3233                         copy_skb = dev_alloc_skb(len + 2);
3234                         if (copy_skb == NULL)
3235                                 goto drop_it_no_recycle;
3236
3237                         copy_skb->dev = tp->dev;
3238                         skb_reserve(copy_skb, 2);
3239                         skb_put(copy_skb, len);
3240                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3241                         memcpy(copy_skb->data, skb->data, len);
3242                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3243
3244                         /* We'll reuse the original ring buffer. */
3245                         skb = copy_skb;
3246                 }
3247
3248                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3249                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3250                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3251                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3252                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3253                 else
3254                         skb->ip_summed = CHECKSUM_NONE;
3255
3256                 skb->protocol = eth_type_trans(skb, tp->dev);
3257 #if TG3_VLAN_TAG_USED
3258                 if (tp->vlgrp != NULL &&
3259                     desc->type_flags & RXD_FLAG_VLAN) {
3260                         tg3_vlan_rx(tp, skb,
3261                                     desc->err_vlan & RXD_VLAN_MASK);
3262                 } else
3263 #endif
3264                         netif_receive_skb(skb);
3265
3266                 tp->dev->last_rx = jiffies;
3267                 received++;
3268                 budget--;
3269
3270 next_pkt:
3271                 (*post_ptr)++;
3272 next_pkt_nopost:
3273                 sw_idx++;
3274                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3275
3276                 /* Refresh hw_idx to see if there is new work */
3277                 if (sw_idx == hw_idx) {
3278                         hw_idx = tp->hw_status->idx[0].rx_producer;
3279                         rmb();
3280                 }
3281         }
3282
3283         /* ACK the status ring. */
3284         tp->rx_rcb_ptr = sw_idx;
3285         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3286
3287         /* Refill RX ring(s). */
3288         if (work_mask & RXD_OPAQUE_RING_STD) {
3289                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3290                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3291                              sw_idx);
3292         }
3293         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3294                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3295                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3296                              sw_idx);
3297         }
3298         mmiowb();
3299
3300         return received;
3301 }
3302
3303 static int tg3_poll(struct net_device *netdev, int *budget)
3304 {
3305         struct tg3 *tp = netdev_priv(netdev);
3306         struct tg3_hw_status *sblk = tp->hw_status;
3307         int done;
3308
3309         /* handle link change and other phy events */
3310         if (!(tp->tg3_flags &
3311               (TG3_FLAG_USE_LINKCHG_REG |
3312                TG3_FLAG_POLL_SERDES))) {
3313                 if (sblk->status & SD_STATUS_LINK_CHG) {
3314                         sblk->status = SD_STATUS_UPDATED |
3315                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3316                         spin_lock(&tp->lock);
3317                         tg3_setup_phy(tp, 0);
3318                         spin_unlock(&tp->lock);
3319                 }
3320         }
3321
3322         /* run TX completion thread */
3323         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3324                 tg3_tx(tp);
3325         }
3326
3327         /* run RX thread, within the bounds set by NAPI.
3328          * All RX "locking" is done by ensuring outside
3329          * code synchronizes with dev->poll()
3330          */
3331         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3332                 int orig_budget = *budget;
3333                 int work_done;
3334
3335                 if (orig_budget > netdev->quota)
3336                         orig_budget = netdev->quota;
3337
3338                 work_done = tg3_rx(tp, orig_budget);
3339
3340                 *budget -= work_done;
3341                 netdev->quota -= work_done;
3342         }
3343
3344         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3345                 tp->last_tag = sblk->status_tag;
3346                 rmb();
3347         } else
3348                 sblk->status &= ~SD_STATUS_UPDATED;
3349
3350         /* if no more work, tell net stack and NIC we're done */
3351         done = !tg3_has_work(tp);
3352         if (done) {
3353                 netif_rx_complete(netdev);
3354                 tg3_restart_ints(tp);
3355         }
3356
3357         return (done ? 0 : 1);
3358 }
3359
3360 static void tg3_irq_quiesce(struct tg3 *tp)
3361 {
3362         BUG_ON(tp->irq_sync);
3363
3364         tp->irq_sync = 1;
3365         smp_mb();
3366
3367         synchronize_irq(tp->pdev->irq);
3368 }
3369
3370 static inline int tg3_irq_sync(struct tg3 *tp)
3371 {
3372         return tp->irq_sync;
3373 }
3374
3375 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3376  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3377  * with as well.  Most of the time, this is not necessary except when
3378  * shutting down the device.
3379  */
3380 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3381 {
3382         if (irq_sync)
3383                 tg3_irq_quiesce(tp);
3384         spin_lock_bh(&tp->lock);
3385         spin_lock(&tp->tx_lock);
3386 }
3387
3388 static inline void tg3_full_unlock(struct tg3 *tp)
3389 {
3390         spin_unlock(&tp->tx_lock);
3391         spin_unlock_bh(&tp->lock);
3392 }
3393
3394 /* One-shot MSI handler - Chip automatically disables interrupt
3395  * after sending MSI so driver doesn't have to do it.
3396  */
3397 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3398 {
3399         struct net_device *dev = dev_id;
3400         struct tg3 *tp = netdev_priv(dev);
3401
3402         prefetch(tp->hw_status);
3403         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3404
3405         if (likely(!tg3_irq_sync(tp)))
3406                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3407
3408         return IRQ_HANDLED;
3409 }
3410
3411 /* MSI ISR - No need to check for interrupt sharing and no need to
3412  * flush status block and interrupt mailbox. PCI ordering rules
3413  * guarantee that MSI will arrive after the status block.
3414  */
3415 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3416 {
3417         struct net_device *dev = dev_id;
3418         struct tg3 *tp = netdev_priv(dev);
3419
3420         prefetch(tp->hw_status);
3421         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3422         /*
3423          * Writing any value to intr-mbox-0 clears PCI INTA# and
3424          * chip-internal interrupt pending events.
3425          * Writing non-zero to intr-mbox-0 additional tells the
3426          * NIC to stop sending us irqs, engaging "in-intr-handler"
3427          * event coalescing.
3428          */
3429         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3430         if (likely(!tg3_irq_sync(tp)))
3431                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3432
3433         return IRQ_RETVAL(1);
3434 }
3435
3436 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3437 {
3438         struct net_device *dev = dev_id;
3439         struct tg3 *tp = netdev_priv(dev);
3440         struct tg3_hw_status *sblk = tp->hw_status;
3441         unsigned int handled = 1;
3442
3443         /* In INTx mode, it is possible for the interrupt to arrive at
3444          * the CPU before the status block posted prior to the interrupt.
3445          * Reading the PCI State register will confirm whether the
3446          * interrupt is ours and will flush the status block.
3447          */
3448         if ((sblk->status & SD_STATUS_UPDATED) ||
3449             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3450                 /*
3451                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3452                  * chip-internal interrupt pending events.
3453                  * Writing non-zero to intr-mbox-0 additional tells the
3454                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3455                  * event coalescing.
3456                  */
3457                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3458                              0x00000001);
3459                 if (tg3_irq_sync(tp))
3460                         goto out;
3461                 sblk->status &= ~SD_STATUS_UPDATED;
3462                 if (likely(tg3_has_work(tp))) {
3463                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3464                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3465                 } else {
3466                         /* No work, shared interrupt perhaps?  re-enable
3467                          * interrupts, and flush that PCI write
3468                          */
3469                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3470                                 0x00000000);
3471                 }
3472         } else {        /* shared interrupt */
3473                 handled = 0;
3474         }
3475 out:
3476         return IRQ_RETVAL(handled);
3477 }
3478
3479 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3480 {
3481         struct net_device *dev = dev_id;
3482         struct tg3 *tp = netdev_priv(dev);
3483         struct tg3_hw_status *sblk = tp->hw_status;
3484         unsigned int handled = 1;
3485
3486         /* In INTx mode, it is possible for the interrupt to arrive at
3487          * the CPU before the status block posted prior to the interrupt.
3488          * Reading the PCI State register will confirm whether the
3489          * interrupt is ours and will flush the status block.
3490          */
3491         if ((sblk->status_tag != tp->last_tag) ||
3492             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3493                 /*
3494                  * writing any value to intr-mbox-0 clears PCI INTA# and
3495                  * chip-internal interrupt pending events.
3496                  * writing non-zero to intr-mbox-0 additional tells the
3497                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3498                  * event coalescing.
3499                  */
3500                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3501                              0x00000001);
3502                 if (tg3_irq_sync(tp))
3503                         goto out;
3504                 if (netif_rx_schedule_prep(dev)) {
3505                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3506                         /* Update last_tag to mark that this status has been
3507                          * seen. Because interrupt may be shared, we may be
3508                          * racing with tg3_poll(), so only update last_tag
3509                          * if tg3_poll() is not scheduled.
3510                          */
3511                         tp->last_tag = sblk->status_tag;
3512                         __netif_rx_schedule(dev);
3513                 }
3514         } else {        /* shared interrupt */
3515                 handled = 0;
3516         }
3517 out:
3518         return IRQ_RETVAL(handled);
3519 }
3520
3521 /* ISR for interrupt test */
3522 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3523                 struct pt_regs *regs)
3524 {
3525         struct net_device *dev = dev_id;
3526         struct tg3 *tp = netdev_priv(dev);
3527         struct tg3_hw_status *sblk = tp->hw_status;
3528
3529         if ((sblk->status & SD_STATUS_UPDATED) ||
3530             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3531                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3532                              0x00000001);
3533                 return IRQ_RETVAL(1);
3534         }
3535         return IRQ_RETVAL(0);
3536 }
3537
3538 static int tg3_init_hw(struct tg3 *);
3539 static int tg3_halt(struct tg3 *, int, int);
3540
3541 #ifdef CONFIG_NET_POLL_CONTROLLER
3542 static void tg3_poll_controller(struct net_device *dev)
3543 {
3544         struct tg3 *tp = netdev_priv(dev);
3545
3546         tg3_interrupt(tp->pdev->irq, dev, NULL);
3547 }
3548 #endif
3549
3550 static void tg3_reset_task(void *_data)
3551 {
3552         struct tg3 *tp = _data;
3553         unsigned int restart_timer;
3554
3555         tg3_full_lock(tp, 0);
3556         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3557
3558         if (!netif_running(tp->dev)) {
3559                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3560                 tg3_full_unlock(tp);
3561                 return;
3562         }
3563
3564         tg3_full_unlock(tp);
3565
3566         tg3_netif_stop(tp);
3567
3568         tg3_full_lock(tp, 1);
3569
3570         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3571         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3572
3573         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3574         tg3_init_hw(tp);
3575
3576         tg3_netif_start(tp);
3577
3578         if (restart_timer)
3579                 mod_timer(&tp->timer, jiffies + 1);
3580
3581         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3582
3583         tg3_full_unlock(tp);
3584 }
3585
3586 static void tg3_tx_timeout(struct net_device *dev)
3587 {
3588         struct tg3 *tp = netdev_priv(dev);
3589
3590         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3591                dev->name);
3592
3593         schedule_work(&tp->reset_task);
3594 }
3595
3596 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3597 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3598 {
3599         u32 base = (u32) mapping & 0xffffffff;
3600
3601         return ((base > 0xffffdcc0) &&
3602                 (base + len + 8 < base));
3603 }
3604
3605 /* Test for DMA addresses > 40-bit */
3606 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3607                                           int len)
3608 {
3609 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3610         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3611                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3612         return 0;
3613 #else
3614         return 0;
3615 #endif
3616 }
3617
3618 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3619
3620 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3621 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3622                                        u32 last_plus_one, u32 *start,
3623                                        u32 base_flags, u32 mss)
3624 {
3625         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3626         dma_addr_t new_addr = 0;
3627         u32 entry = *start;
3628         int i, ret = 0;
3629
3630         if (!new_skb) {
3631                 ret = -1;
3632         } else {
3633                 /* New SKB is guaranteed to be linear. */
3634                 entry = *start;
3635                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3636                                           PCI_DMA_TODEVICE);
3637                 /* Make sure new skb does not cross any 4G boundaries.
3638                  * Drop the packet if it does.
3639                  */
3640                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3641                         ret = -1;
3642                         dev_kfree_skb(new_skb);
3643                         new_skb = NULL;
3644                 } else {
3645                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3646                                     base_flags, 1 | (mss << 1));
3647                         *start = NEXT_TX(entry);
3648                 }
3649         }
3650
3651         /* Now clean up the sw ring entries. */
3652         i = 0;
3653         while (entry != last_plus_one) {
3654                 int len;
3655
3656                 if (i == 0)
3657                         len = skb_headlen(skb);
3658                 else
3659                         len = skb_shinfo(skb)->frags[i-1].size;
3660                 pci_unmap_single(tp->pdev,
3661                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3662                                  len, PCI_DMA_TODEVICE);
3663                 if (i == 0) {
3664                         tp->tx_buffers[entry].skb = new_skb;
3665                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3666                 } else {
3667                         tp->tx_buffers[entry].skb = NULL;
3668                 }
3669                 entry = NEXT_TX(entry);
3670                 i++;
3671         }
3672
3673         dev_kfree_skb(skb);
3674
3675         return ret;
3676 }
3677
3678 static void tg3_set_txd(struct tg3 *tp, int entry,
3679                         dma_addr_t mapping, int len, u32 flags,
3680                         u32 mss_and_is_end)
3681 {
3682         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3683         int is_end = (mss_and_is_end & 0x1);
3684         u32 mss = (mss_and_is_end >> 1);
3685         u32 vlan_tag = 0;
3686
3687         if (is_end)
3688                 flags |= TXD_FLAG_END;
3689         if (flags & TXD_FLAG_VLAN) {
3690                 vlan_tag = flags >> 16;
3691                 flags &= 0xffff;
3692         }
3693         vlan_tag |= (mss << TXD_MSS_SHIFT);
3694
3695         txd->addr_hi = ((u64) mapping >> 32);
3696         txd->addr_lo = ((u64) mapping & 0xffffffff);
3697         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3698         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3699 }
3700
3701 /* hard_start_xmit for devices that don't have any bugs and
3702  * support TG3_FLG2_HW_TSO_2 only.
3703  */
3704 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3705 {
3706         struct tg3 *tp = netdev_priv(dev);
3707         dma_addr_t mapping;
3708         u32 len, entry, base_flags, mss;
3709
3710         len = skb_headlen(skb);
3711
3712         /* No BH disabling for tx_lock here.  We are running in BH disabled
3713          * context and TX reclaim runs via tp->poll inside of a software
3714          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3715          * no IRQ context deadlocks to worry about either.  Rejoice!
3716          */
3717         if (!spin_trylock(&tp->tx_lock))
3718                 return NETDEV_TX_LOCKED;
3719
3720         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3721                 if (!netif_queue_stopped(dev)) {
3722                         netif_stop_queue(dev);
3723
3724                         /* This is a hard error, log it. */
3725                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3726                                "queue awake!\n", dev->name);
3727                 }
3728                 spin_unlock(&tp->tx_lock);
3729                 return NETDEV_TX_BUSY;
3730         }
3731
3732         entry = tp->tx_prod;
3733         base_flags = 0;
3734 #if TG3_TSO_SUPPORT != 0
3735         mss = 0;
3736         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3737             (mss = skb_shinfo(skb)->tso_size) != 0) {
3738                 int tcp_opt_len, ip_tcp_len;
3739
3740                 if (skb_header_cloned(skb) &&
3741                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3742                         dev_kfree_skb(skb);
3743                         goto out_unlock;
3744                 }
3745
3746                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3747                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3748
3749                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3750                                TXD_FLAG_CPU_POST_DMA);
3751
3752                 skb->nh.iph->check = 0;
3753                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3754
3755                 skb->h.th->check = 0;
3756
3757                 mss |= (ip_tcp_len + tcp_opt_len) << 9;
3758         }
3759         else if (skb->ip_summed == CHECKSUM_HW)
3760                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3761 #else
3762         mss = 0;
3763         if (skb->ip_summed == CHECKSUM_HW)
3764                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3765 #endif
3766 #if TG3_VLAN_TAG_USED
3767         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3768                 base_flags |= (TXD_FLAG_VLAN |
3769                                (vlan_tx_tag_get(skb) << 16));
3770 #endif
3771
3772         /* Queue skb data, a.k.a. the main skb fragment. */
3773         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3774
3775         tp->tx_buffers[entry].skb = skb;
3776         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3777
3778         tg3_set_txd(tp, entry, mapping, len, base_flags,
3779                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3780
3781         entry = NEXT_TX(entry);
3782
3783         /* Now loop through additional data fragments, and queue them. */
3784         if (skb_shinfo(skb)->nr_frags > 0) {
3785                 unsigned int i, last;
3786
3787                 last = skb_shinfo(skb)->nr_frags - 1;
3788                 for (i = 0; i <= last; i++) {
3789                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3790
3791                         len = frag->size;
3792                         mapping = pci_map_page(tp->pdev,
3793                                                frag->page,
3794                                                frag->page_offset,
3795                                                len, PCI_DMA_TODEVICE);
3796
3797                         tp->tx_buffers[entry].skb = NULL;
3798                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3799
3800                         tg3_set_txd(tp, entry, mapping, len,
3801                                     base_flags, (i == last) | (mss << 1));
3802
3803                         entry = NEXT_TX(entry);
3804                 }
3805         }
3806
3807         /* Packets are ready, update Tx producer idx local and on card. */
3808         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3809
3810         tp->tx_prod = entry;
3811         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3812                 netif_stop_queue(dev);
3813                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3814                         netif_wake_queue(tp->dev);
3815         }
3816
3817 out_unlock:
3818         mmiowb();
3819         spin_unlock(&tp->tx_lock);
3820
3821         dev->trans_start = jiffies;
3822
3823         return NETDEV_TX_OK;
3824 }
3825
3826 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3827  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3828  */
3829 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3830 {
3831         struct tg3 *tp = netdev_priv(dev);
3832         dma_addr_t mapping;
3833         u32 len, entry, base_flags, mss;
3834         int would_hit_hwbug;
3835
3836         len = skb_headlen(skb);
3837
3838         /* No BH disabling for tx_lock here.  We are running in BH disabled
3839          * context and TX reclaim runs via tp->poll inside of a software
3840          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3841          * no IRQ context deadlocks to worry about either.  Rejoice!
3842          */
3843         if (!spin_trylock(&tp->tx_lock))
3844                 return NETDEV_TX_LOCKED; 
3845
3846         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3847                 if (!netif_queue_stopped(dev)) {
3848                         netif_stop_queue(dev);
3849
3850                         /* This is a hard error, log it. */
3851                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3852                                "queue awake!\n", dev->name);
3853                 }
3854                 spin_unlock(&tp->tx_lock);
3855                 return NETDEV_TX_BUSY;
3856         }
3857
3858         entry = tp->tx_prod;
3859         base_flags = 0;
3860         if (skb->ip_summed == CHECKSUM_HW)
3861                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3862 #if TG3_TSO_SUPPORT != 0
3863         mss = 0;
3864         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3865             (mss = skb_shinfo(skb)->tso_size) != 0) {
3866                 int tcp_opt_len, ip_tcp_len;
3867
3868                 if (skb_header_cloned(skb) &&
3869                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3870                         dev_kfree_skb(skb);
3871                         goto out_unlock;
3872                 }
3873
3874                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3875                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3876
3877                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3878                                TXD_FLAG_CPU_POST_DMA);
3879
3880                 skb->nh.iph->check = 0;
3881                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3882                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3883                         skb->h.th->check = 0;
3884                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3885                 }
3886                 else {
3887                         skb->h.th->check =
3888                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3889                                                    skb->nh.iph->daddr,
3890                                                    0, IPPROTO_TCP, 0);
3891                 }
3892
3893                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3894                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3895                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3896                                 int tsflags;
3897
3898                                 tsflags = ((skb->nh.iph->ihl - 5) +
3899                                            (tcp_opt_len >> 2));
3900                                 mss |= (tsflags << 11);
3901                         }
3902                 } else {
3903                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3904                                 int tsflags;
3905
3906                                 tsflags = ((skb->nh.iph->ihl - 5) +
3907                                            (tcp_opt_len >> 2));
3908                                 base_flags |= tsflags << 12;
3909                         }
3910                 }
3911         }
3912 #else
3913         mss = 0;
3914 #endif
3915 #if TG3_VLAN_TAG_USED
3916         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3917                 base_flags |= (TXD_FLAG_VLAN |
3918                                (vlan_tx_tag_get(skb) << 16));
3919 #endif
3920
3921         /* Queue skb data, a.k.a. the main skb fragment. */
3922         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3923
3924         tp->tx_buffers[entry].skb = skb;
3925         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3926
3927         would_hit_hwbug = 0;
3928
3929         if (tg3_4g_overflow_test(mapping, len))
3930                 would_hit_hwbug = 1;
3931
3932         tg3_set_txd(tp, entry, mapping, len, base_flags,
3933                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3934
3935         entry = NEXT_TX(entry);
3936
3937         /* Now loop through additional data fragments, and queue them. */
3938         if (skb_shinfo(skb)->nr_frags > 0) {
3939                 unsigned int i, last;
3940
3941                 last = skb_shinfo(skb)->nr_frags - 1;
3942                 for (i = 0; i <= last; i++) {
3943                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3944
3945                         len = frag->size;
3946                         mapping = pci_map_page(tp->pdev,
3947                                                frag->page,
3948                                                frag->page_offset,
3949                                                len, PCI_DMA_TODEVICE);
3950
3951                         tp->tx_buffers[entry].skb = NULL;
3952                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3953
3954                         if (tg3_4g_overflow_test(mapping, len))
3955                                 would_hit_hwbug = 1;
3956
3957                         if (tg3_40bit_overflow_test(tp, mapping, len))
3958                                 would_hit_hwbug = 1;
3959
3960                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3961                                 tg3_set_txd(tp, entry, mapping, len,
3962                                             base_flags, (i == last)|(mss << 1));
3963                         else
3964                                 tg3_set_txd(tp, entry, mapping, len,
3965                                             base_flags, (i == last));
3966
3967                         entry = NEXT_TX(entry);
3968                 }
3969         }
3970
3971         if (would_hit_hwbug) {
3972                 u32 last_plus_one = entry;
3973                 u32 start;
3974
3975                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3976                 start &= (TG3_TX_RING_SIZE - 1);
3977
3978                 /* If the workaround fails due to memory/mapping
3979                  * failure, silently drop this packet.
3980                  */
3981                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
3982                                                 &start, base_flags, mss))
3983                         goto out_unlock;
3984
3985                 entry = start;
3986         }
3987
3988         /* Packets are ready, update Tx producer idx local and on card. */
3989         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3990
3991         tp->tx_prod = entry;
3992         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) {
3993                 netif_stop_queue(dev);
3994                 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)
3995                         netif_wake_queue(tp->dev);
3996         }
3997
3998 out_unlock:
3999         mmiowb();
4000         spin_unlock(&tp->tx_lock);
4001
4002         dev->trans_start = jiffies;
4003
4004         return NETDEV_TX_OK;
4005 }
4006
4007 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4008                                int new_mtu)
4009 {
4010         dev->mtu = new_mtu;
4011
4012         if (new_mtu > ETH_DATA_LEN) {
4013                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4014                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4015                         ethtool_op_set_tso(dev, 0);
4016                 }
4017                 else
4018                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4019         } else {
4020                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4021                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4022                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4023         }
4024 }
4025
4026 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4027 {
4028         struct tg3 *tp = netdev_priv(dev);
4029
4030         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4031                 return -EINVAL;
4032
4033         if (!netif_running(dev)) {
4034                 /* We'll just catch it later when the
4035                  * device is up'd.
4036                  */
4037                 tg3_set_mtu(dev, tp, new_mtu);
4038                 return 0;
4039         }
4040
4041         tg3_netif_stop(tp);
4042
4043         tg3_full_lock(tp, 1);
4044
4045         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4046
4047         tg3_set_mtu(dev, tp, new_mtu);
4048
4049         tg3_init_hw(tp);
4050
4051         tg3_netif_start(tp);
4052
4053         tg3_full_unlock(tp);
4054
4055         return 0;
4056 }
4057
4058 /* Free up pending packets in all rx/tx rings.
4059  *
4060  * The chip has been shut down and the driver detached from
4061  * the networking, so no interrupts or new tx packets will
4062  * end up in the driver.  tp->{tx,}lock is not held and we are not
4063  * in an interrupt context and thus may sleep.
4064  */
4065 static void tg3_free_rings(struct tg3 *tp)
4066 {
4067         struct ring_info *rxp;
4068         int i;
4069
4070         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4071                 rxp = &tp->rx_std_buffers[i];
4072
4073                 if (rxp->skb == NULL)
4074                         continue;
4075                 pci_unmap_single(tp->pdev,
4076                                  pci_unmap_addr(rxp, mapping),
4077                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4078                                  PCI_DMA_FROMDEVICE);
4079                 dev_kfree_skb_any(rxp->skb);
4080                 rxp->skb = NULL;
4081         }
4082
4083         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4084                 rxp = &tp->rx_jumbo_buffers[i];
4085
4086                 if (rxp->skb == NULL)
4087                         continue;
4088                 pci_unmap_single(tp->pdev,
4089                                  pci_unmap_addr(rxp, mapping),
4090                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4091                                  PCI_DMA_FROMDEVICE);
4092                 dev_kfree_skb_any(rxp->skb);
4093                 rxp->skb = NULL;
4094         }
4095
4096         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4097                 struct tx_ring_info *txp;
4098                 struct sk_buff *skb;
4099                 int j;
4100
4101                 txp = &tp->tx_buffers[i];
4102                 skb = txp->skb;
4103
4104                 if (skb == NULL) {
4105                         i++;
4106                         continue;
4107                 }
4108
4109                 pci_unmap_single(tp->pdev,
4110                                  pci_unmap_addr(txp, mapping),
4111                                  skb_headlen(skb),
4112                                  PCI_DMA_TODEVICE);
4113                 txp->skb = NULL;
4114
4115                 i++;
4116
4117                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4118                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4119                         pci_unmap_page(tp->pdev,
4120                                        pci_unmap_addr(txp, mapping),
4121                                        skb_shinfo(skb)->frags[j].size,
4122                                        PCI_DMA_TODEVICE);
4123                         i++;
4124                 }
4125
4126                 dev_kfree_skb_any(skb);
4127         }
4128 }
4129
4130 /* Initialize tx/rx rings for packet processing.
4131  *
4132  * The chip has been shut down and the driver detached from
4133  * the networking, so no interrupts or new tx packets will
4134  * end up in the driver.  tp->{tx,}lock are held and thus
4135  * we may not sleep.
4136  */
4137 static void tg3_init_rings(struct tg3 *tp)
4138 {
4139         u32 i;
4140
4141         /* Free up all the SKBs. */
4142         tg3_free_rings(tp);
4143
4144         /* Zero out all descriptors. */
4145         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4146         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4147         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4148         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4149
4150         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4151         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4152             (tp->dev->mtu > ETH_DATA_LEN))
4153                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4154
4155         /* Initialize invariants of the rings, we only set this
4156          * stuff once.  This works because the card does not
4157          * write into the rx buffer posting rings.
4158          */
4159         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4160                 struct tg3_rx_buffer_desc *rxd;
4161
4162                 rxd = &tp->rx_std[i];
4163                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4164                         << RXD_LEN_SHIFT;
4165                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4166                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4167                                (i << RXD_OPAQUE_INDEX_SHIFT));
4168         }
4169
4170         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4171                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4172                         struct tg3_rx_buffer_desc *rxd;
4173
4174                         rxd = &tp->rx_jumbo[i];
4175                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4176                                 << RXD_LEN_SHIFT;
4177                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4178                                 RXD_FLAG_JUMBO;
4179                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4180                                (i << RXD_OPAQUE_INDEX_SHIFT));
4181                 }
4182         }
4183
4184         /* Now allocate fresh SKBs for each rx ring. */
4185         for (i = 0; i < tp->rx_pending; i++) {
4186                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
4187                                      -1, i) < 0)
4188                         break;
4189         }
4190
4191         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4192                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4193                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4194                                              -1, i) < 0)
4195                                 break;
4196                 }
4197         }
4198 }
4199
4200 /*
4201  * Must not be invoked with interrupt sources disabled and
4202  * the hardware shutdown down.
4203  */
4204 static void tg3_free_consistent(struct tg3 *tp)
4205 {
4206         kfree(tp->rx_std_buffers);
4207         tp->rx_std_buffers = NULL;
4208         if (tp->rx_std) {
4209                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4210                                     tp->rx_std, tp->rx_std_mapping);
4211                 tp->rx_std = NULL;
4212         }
4213         if (tp->rx_jumbo) {
4214                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4215                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4216                 tp->rx_jumbo = NULL;
4217         }
4218         if (tp->rx_rcb) {
4219                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4220                                     tp->rx_rcb, tp->rx_rcb_mapping);
4221                 tp->rx_rcb = NULL;
4222         }
4223         if (tp->tx_ring) {
4224                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4225                         tp->tx_ring, tp->tx_desc_mapping);
4226                 tp->tx_ring = NULL;
4227         }
4228         if (tp->hw_status) {
4229                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4230                                     tp->hw_status, tp->status_mapping);
4231                 tp->hw_status = NULL;
4232         }
4233         if (tp->hw_stats) {
4234                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4235                                     tp->hw_stats, tp->stats_mapping);
4236                 tp->hw_stats = NULL;
4237         }
4238 }
4239
4240 /*
4241  * Must not be invoked with interrupt sources disabled and
4242  * the hardware shutdown down.  Can sleep.
4243  */
4244 static int tg3_alloc_consistent(struct tg3 *tp)
4245 {
4246         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4247                                       (TG3_RX_RING_SIZE +
4248                                        TG3_RX_JUMBO_RING_SIZE)) +
4249                                      (sizeof(struct tx_ring_info) *
4250                                       TG3_TX_RING_SIZE),
4251                                      GFP_KERNEL);
4252         if (!tp->rx_std_buffers)
4253                 return -ENOMEM;
4254
4255         memset(tp->rx_std_buffers, 0,
4256                (sizeof(struct ring_info) *
4257                 (TG3_RX_RING_SIZE +
4258                  TG3_RX_JUMBO_RING_SIZE)) +
4259                (sizeof(struct tx_ring_info) *
4260                 TG3_TX_RING_SIZE));
4261
4262         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4263         tp->tx_buffers = (struct tx_ring_info *)
4264                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4265
4266         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4267                                           &tp->rx_std_mapping);
4268         if (!tp->rx_std)
4269                 goto err_out;
4270
4271         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4272                                             &tp->rx_jumbo_mapping);
4273
4274         if (!tp->rx_jumbo)
4275                 goto err_out;
4276
4277         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4278                                           &tp->rx_rcb_mapping);
4279         if (!tp->rx_rcb)
4280                 goto err_out;
4281
4282         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4283                                            &tp->tx_desc_mapping);
4284         if (!tp->tx_ring)
4285                 goto err_out;
4286
4287         tp->hw_status = pci_alloc_consistent(tp->pdev,
4288                                              TG3_HW_STATUS_SIZE,
4289                                              &tp->status_mapping);
4290         if (!tp->hw_status)
4291                 goto err_out;
4292
4293         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4294                                             sizeof(struct tg3_hw_stats),
4295                                             &tp->stats_mapping);
4296         if (!tp->hw_stats)
4297                 goto err_out;
4298
4299         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4300         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4301
4302         return 0;
4303
4304 err_out:
4305         tg3_free_consistent(tp);
4306         return -ENOMEM;
4307 }
4308
4309 #define MAX_WAIT_CNT 1000
4310
4311 /* To stop a block, clear the enable bit and poll till it
4312  * clears.  tp->lock is held.
4313  */
4314 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4315 {
4316         unsigned int i;
4317         u32 val;
4318
4319         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4320                 switch (ofs) {
4321                 case RCVLSC_MODE:
4322                 case DMAC_MODE:
4323                 case MBFREE_MODE:
4324                 case BUFMGR_MODE:
4325                 case MEMARB_MODE:
4326                         /* We can't enable/disable these bits of the
4327                          * 5705/5750, just say success.
4328                          */
4329                         return 0;
4330
4331                 default:
4332                         break;
4333                 };
4334         }
4335
4336         val = tr32(ofs);
4337         val &= ~enable_bit;
4338         tw32_f(ofs, val);
4339
4340         for (i = 0; i < MAX_WAIT_CNT; i++) {
4341                 udelay(100);
4342                 val = tr32(ofs);
4343                 if ((val & enable_bit) == 0)
4344                         break;
4345         }
4346
4347         if (i == MAX_WAIT_CNT && !silent) {
4348                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4349                        "ofs=%lx enable_bit=%x\n",
4350                        ofs, enable_bit);
4351                 return -ENODEV;
4352         }
4353
4354         return 0;
4355 }
4356
4357 /* tp->lock is held. */
4358 static int tg3_abort_hw(struct tg3 *tp, int silent)
4359 {
4360         int i, err;
4361
4362         tg3_disable_ints(tp);
4363
4364         tp->rx_mode &= ~RX_MODE_ENABLE;
4365         tw32_f(MAC_RX_MODE, tp->rx_mode);
4366         udelay(10);
4367
4368         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4369         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4370         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4371         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4372         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4373         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4374
4375         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4376         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4377         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4378         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4379         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4380         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4381         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4382
4383         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4384         tw32_f(MAC_MODE, tp->mac_mode);
4385         udelay(40);
4386
4387         tp->tx_mode &= ~TX_MODE_ENABLE;
4388         tw32_f(MAC_TX_MODE, tp->tx_mode);
4389
4390         for (i = 0; i < MAX_WAIT_CNT; i++) {
4391                 udelay(100);
4392                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4393                         break;
4394         }
4395         if (i >= MAX_WAIT_CNT) {
4396                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4397                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4398                        tp->dev->name, tr32(MAC_TX_MODE));
4399                 err |= -ENODEV;
4400         }
4401
4402         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4403         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4404         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4405
4406         tw32(FTQ_RESET, 0xffffffff);
4407         tw32(FTQ_RESET, 0x00000000);
4408
4409         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4410         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4411
4412         if (tp->hw_status)
4413                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4414         if (tp->hw_stats)
4415                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4416
4417         return err;
4418 }
4419
4420 /* tp->lock is held. */
4421 static int tg3_nvram_lock(struct tg3 *tp)
4422 {
4423         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4424                 int i;
4425
4426                 if (tp->nvram_lock_cnt == 0) {
4427                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4428                         for (i = 0; i < 8000; i++) {
4429                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4430                                         break;
4431                                 udelay(20);
4432                         }
4433                         if (i == 8000) {
4434                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4435                                 return -ENODEV;
4436                         }
4437                 }
4438                 tp->nvram_lock_cnt++;
4439         }
4440         return 0;
4441 }
4442
4443 /* tp->lock is held. */
4444 static void tg3_nvram_unlock(struct tg3 *tp)
4445 {
4446         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4447                 if (tp->nvram_lock_cnt > 0)
4448                         tp->nvram_lock_cnt--;
4449                 if (tp->nvram_lock_cnt == 0)
4450                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4451         }
4452 }
4453
4454 /* tp->lock is held. */
4455 static void tg3_enable_nvram_access(struct tg3 *tp)
4456 {
4457         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4458             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4459                 u32 nvaccess = tr32(NVRAM_ACCESS);
4460
4461                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4462         }
4463 }
4464
4465 /* tp->lock is held. */
4466 static void tg3_disable_nvram_access(struct tg3 *tp)
4467 {
4468         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4469             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4470                 u32 nvaccess = tr32(NVRAM_ACCESS);
4471
4472                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4473         }
4474 }
4475
4476 /* tp->lock is held. */
4477 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4478 {
4479         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
4480                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4481                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4482
4483         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4484                 switch (kind) {
4485                 case RESET_KIND_INIT:
4486                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4487                                       DRV_STATE_START);
4488                         break;
4489
4490                 case RESET_KIND_SHUTDOWN:
4491                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4492                                       DRV_STATE_UNLOAD);
4493                         break;
4494
4495                 case RESET_KIND_SUSPEND:
4496                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4497                                       DRV_STATE_SUSPEND);
4498                         break;
4499
4500                 default:
4501                         break;
4502                 };
4503         }
4504 }
4505
4506 /* tp->lock is held. */
4507 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4508 {
4509         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4510                 switch (kind) {
4511                 case RESET_KIND_INIT:
4512                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4513                                       DRV_STATE_START_DONE);
4514                         break;
4515
4516                 case RESET_KIND_SHUTDOWN:
4517                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4518                                       DRV_STATE_UNLOAD_DONE);
4519                         break;
4520
4521                 default:
4522                         break;
4523                 };
4524         }
4525 }
4526
4527 /* tp->lock is held. */
4528 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4529 {
4530         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4531                 switch (kind) {
4532                 case RESET_KIND_INIT:
4533                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4534                                       DRV_STATE_START);
4535                         break;
4536
4537                 case RESET_KIND_SHUTDOWN:
4538                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4539                                       DRV_STATE_UNLOAD);
4540                         break;
4541
4542                 case RESET_KIND_SUSPEND:
4543                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4544                                       DRV_STATE_SUSPEND);
4545                         break;
4546
4547                 default:
4548                         break;
4549                 };
4550         }
4551 }
4552
4553 static void tg3_stop_fw(struct tg3 *);
4554
4555 /* tp->lock is held. */
4556 static int tg3_chip_reset(struct tg3 *tp)
4557 {
4558         u32 val;
4559         void (*write_op)(struct tg3 *, u32, u32);
4560         int i;
4561
4562         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4563                 tg3_nvram_lock(tp);
4564                 /* No matching tg3_nvram_unlock() after this because
4565                  * chip reset below will undo the nvram lock.
4566                  */
4567                 tp->nvram_lock_cnt = 0;
4568         }
4569
4570         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4571             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4572             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4573                 tw32(GRC_FASTBOOT_PC, 0);
4574
4575         /*
4576          * We must avoid the readl() that normally takes place.
4577          * It locks machines, causes machine checks, and other
4578          * fun things.  So, temporarily disable the 5701
4579          * hardware workaround, while we do the reset.
4580          */
4581         write_op = tp->write32;
4582         if (write_op == tg3_write_flush_reg32)
4583                 tp->write32 = tg3_write32;
4584
4585         /* do the reset */
4586         val = GRC_MISC_CFG_CORECLK_RESET;
4587
4588         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4589                 if (tr32(0x7e2c) == 0x60) {
4590                         tw32(0x7e2c, 0x20);
4591                 }
4592                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4593                         tw32(GRC_MISC_CFG, (1 << 29));
4594                         val |= (1 << 29);
4595                 }
4596         }
4597
4598         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4599                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4600         tw32(GRC_MISC_CFG, val);
4601
4602         /* restore 5701 hardware bug workaround write method */
4603         tp->write32 = write_op;
4604
4605         /* Unfortunately, we have to delay before the PCI read back.
4606          * Some 575X chips even will not respond to a PCI cfg access
4607          * when the reset command is given to the chip.
4608          *
4609          * How do these hardware designers expect things to work
4610          * properly if the PCI write is posted for a long period
4611          * of time?  It is always necessary to have some method by
4612          * which a register read back can occur to push the write
4613          * out which does the reset.
4614          *
4615          * For most tg3 variants the trick below was working.
4616          * Ho hum...
4617          */
4618         udelay(120);
4619
4620         /* Flush PCI posted writes.  The normal MMIO registers
4621          * are inaccessible at this time so this is the only
4622          * way to make this reliably (actually, this is no longer
4623          * the case, see above).  I tried to use indirect
4624          * register read/write but this upset some 5701 variants.
4625          */
4626         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4627
4628         udelay(120);
4629
4630         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4631                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4632                         int i;
4633                         u32 cfg_val;
4634
4635                         /* Wait for link training to complete.  */
4636                         for (i = 0; i < 5000; i++)
4637                                 udelay(100);
4638
4639                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4640                         pci_write_config_dword(tp->pdev, 0xc4,
4641                                                cfg_val | (1 << 15));
4642                 }
4643                 /* Set PCIE max payload size and clear error status.  */
4644                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4645         }
4646
4647         /* Re-enable indirect register accesses. */
4648         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4649                                tp->misc_host_ctrl);
4650
4651         /* Set MAX PCI retry to zero. */
4652         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4653         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4654             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4655                 val |= PCISTATE_RETRY_SAME_DMA;
4656         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4657
4658         pci_restore_state(tp->pdev);
4659
4660         /* Make sure PCI-X relaxed ordering bit is clear. */
4661         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4662         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4663         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4664
4665         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4666                 u32 val;
4667
4668                 /* Chip reset on 5780 will reset MSI enable bit,
4669                  * so need to restore it.
4670                  */
4671                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4672                         u16 ctrl;
4673
4674                         pci_read_config_word(tp->pdev,
4675                                              tp->msi_cap + PCI_MSI_FLAGS,
4676                                              &ctrl);
4677                         pci_write_config_word(tp->pdev,
4678                                               tp->msi_cap + PCI_MSI_FLAGS,
4679                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4680                         val = tr32(MSGINT_MODE);
4681                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4682                 }
4683
4684                 val = tr32(MEMARB_MODE);
4685                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4686
4687         } else
4688                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4689
4690         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4691                 tg3_stop_fw(tp);
4692                 tw32(0x5000, 0x400);
4693         }
4694
4695         tw32(GRC_MODE, tp->grc_mode);
4696
4697         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4698                 u32 val = tr32(0xc4);
4699
4700                 tw32(0xc4, val | (1 << 15));
4701         }
4702
4703         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4704             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4705                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4706                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4707                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4708                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4709         }
4710
4711         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4712                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4713                 tw32_f(MAC_MODE, tp->mac_mode);
4714         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4715                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4716                 tw32_f(MAC_MODE, tp->mac_mode);
4717         } else
4718                 tw32_f(MAC_MODE, 0);
4719         udelay(40);
4720
4721         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
4722                 /* Wait for firmware initialization to complete. */
4723                 for (i = 0; i < 100000; i++) {
4724                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4725                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4726                                 break;
4727                         udelay(10);
4728                 }
4729                 if (i >= 100000) {
4730                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4731                                "firmware will not restart magic=%08x\n",
4732                                tp->dev->name, val);
4733                         return -ENODEV;
4734                 }
4735         }
4736
4737         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4738             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4739                 u32 val = tr32(0x7c00);
4740
4741                 tw32(0x7c00, val | (1 << 25));
4742         }
4743
4744         /* Reprobe ASF enable state.  */
4745         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4746         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4747         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4748         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4749                 u32 nic_cfg;
4750
4751                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4752                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4753                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4754                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4755                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4756                 }
4757         }
4758
4759         return 0;
4760 }
4761
4762 /* tp->lock is held. */
4763 static void tg3_stop_fw(struct tg3 *tp)
4764 {
4765         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4766                 u32 val;
4767                 int i;
4768
4769                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4770                 val = tr32(GRC_RX_CPU_EVENT);
4771                 val |= (1 << 14);
4772                 tw32(GRC_RX_CPU_EVENT, val);
4773
4774                 /* Wait for RX cpu to ACK the event.  */
4775                 for (i = 0; i < 100; i++) {
4776                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4777                                 break;
4778                         udelay(1);
4779                 }
4780         }
4781 }
4782
4783 /* tp->lock is held. */
4784 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4785 {
4786         int err;
4787
4788         tg3_stop_fw(tp);
4789
4790         tg3_write_sig_pre_reset(tp, kind);
4791
4792         tg3_abort_hw(tp, silent);
4793         err = tg3_chip_reset(tp);
4794
4795         tg3_write_sig_legacy(tp, kind);
4796         tg3_write_sig_post_reset(tp, kind);
4797
4798         if (err)
4799                 return err;
4800
4801         return 0;
4802 }
4803
4804 #define TG3_FW_RELEASE_MAJOR    0x0
4805 #define TG3_FW_RELASE_MINOR     0x0
4806 #define TG3_FW_RELEASE_FIX      0x0
4807 #define TG3_FW_START_ADDR       0x08000000
4808 #define TG3_FW_TEXT_ADDR        0x08000000
4809 #define TG3_FW_TEXT_LEN         0x9c0
4810 #define TG3_FW_RODATA_ADDR      0x080009c0
4811 #define TG3_FW_RODATA_LEN       0x60
4812 #define TG3_FW_DATA_ADDR        0x08000a40
4813 #define TG3_FW_DATA_LEN         0x20
4814 #define TG3_FW_SBSS_ADDR        0x08000a60
4815 #define TG3_FW_SBSS_LEN         0xc
4816 #define TG3_FW_BSS_ADDR         0x08000a70
4817 #define TG3_FW_BSS_LEN          0x10
4818
4819 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4820         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4821         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4822         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4823         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4824         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4825         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4826         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4827         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4828         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4829         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4830         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4831         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4832         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4833         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4834         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4835         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4836         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4837         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4838         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4839         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4840         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4841         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4842         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4843         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4844         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4845         0, 0, 0, 0, 0, 0,
4846         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4847         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4848         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4849         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4850         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4851         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4852         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4853         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4854         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4855         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4856         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4857         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4858         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4859         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4860         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4861         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4862         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4863         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4864         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4865         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4866         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4867         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4868         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4869         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4870         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4871         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4872         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4873         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4874         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4875         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4876         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4877         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4878         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4879         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4880         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4881         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4882         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4883         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4884         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4885         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4886         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4887         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4888         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4889         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4890         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4891         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4892         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4893         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4894         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4895         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4896         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4897         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4898         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4899         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4900         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4901         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4902         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4903         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4904         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4905         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4906         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4907         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4908         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4909         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4910         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4911 };
4912
4913 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4914         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4915         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4916         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4917         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4918         0x00000000
4919 };
4920
4921 #if 0 /* All zeros, don't eat up space with it. */
4922 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4923         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4924         0x00000000, 0x00000000, 0x00000000, 0x00000000
4925 };
4926 #endif
4927
4928 #define RX_CPU_SCRATCH_BASE     0x30000
4929 #define RX_CPU_SCRATCH_SIZE     0x04000
4930 #define TX_CPU_SCRATCH_BASE     0x34000
4931 #define TX_CPU_SCRATCH_SIZE     0x04000
4932
4933 /* tp->lock is held. */
4934 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4935 {
4936         int i;
4937
4938         if (offset == TX_CPU_BASE &&
4939             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4940                 BUG();
4941
4942         if (offset == RX_CPU_BASE) {
4943                 for (i = 0; i < 10000; i++) {
4944                         tw32(offset + CPU_STATE, 0xffffffff);
4945                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4946                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4947                                 break;
4948                 }
4949
4950                 tw32(offset + CPU_STATE, 0xffffffff);
4951                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4952                 udelay(10);
4953         } else {
4954                 for (i = 0; i < 10000; i++) {
4955                         tw32(offset + CPU_STATE, 0xffffffff);
4956                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4957                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4958                                 break;
4959                 }
4960         }
4961
4962         if (i >= 10000) {
4963                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4964                        "and %s CPU\n",
4965                        tp->dev->name,
4966                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4967                 return -ENODEV;
4968         }
4969
4970         /* Clear firmware's nvram arbitration. */
4971         if (tp->tg3_flags & TG3_FLAG_NVRAM)
4972                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
4973         return 0;
4974 }
4975
4976 struct fw_info {
4977         unsigned int text_base;
4978         unsigned int text_len;
4979         u32 *text_data;
4980         unsigned int rodata_base;
4981         unsigned int rodata_len;
4982         u32 *rodata_data;
4983         unsigned int data_base;
4984         unsigned int data_len;
4985         u32 *data_data;
4986 };
4987
4988 /* tp->lock is held. */
4989 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4990                                  int cpu_scratch_size, struct fw_info *info)
4991 {
4992         int err, lock_err, i;
4993         void (*write_op)(struct tg3 *, u32, u32);
4994
4995         if (cpu_base == TX_CPU_BASE &&
4996             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4997                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4998                        "TX cpu firmware on %s which is 5705.\n",
4999                        tp->dev->name);
5000                 return -EINVAL;
5001         }
5002
5003         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5004                 write_op = tg3_write_mem;
5005         else
5006                 write_op = tg3_write_indirect_reg32;
5007
5008         /* It is possible that bootcode is still loading at this point.
5009          * Get the nvram lock first before halting the cpu.
5010          */
5011         lock_err = tg3_nvram_lock(tp);
5012         err = tg3_halt_cpu(tp, cpu_base);
5013         if (!lock_err)
5014                 tg3_nvram_unlock(tp);
5015         if (err)
5016                 goto out;
5017
5018         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5019                 write_op(tp, cpu_scratch_base + i, 0);
5020         tw32(cpu_base + CPU_STATE, 0xffffffff);
5021         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5022         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5023                 write_op(tp, (cpu_scratch_base +
5024                               (info->text_base & 0xffff) +
5025                               (i * sizeof(u32))),
5026                          (info->text_data ?
5027                           info->text_data[i] : 0));
5028         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5029                 write_op(tp, (cpu_scratch_base +
5030                               (info->rodata_base & 0xffff) +
5031                               (i * sizeof(u32))),
5032                          (info->rodata_data ?
5033                           info->rodata_data[i] : 0));
5034         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5035                 write_op(tp, (cpu_scratch_base +
5036                               (info->data_base & 0xffff) +
5037                               (i * sizeof(u32))),
5038                          (info->data_data ?
5039                           info->data_data[i] : 0));
5040
5041         err = 0;
5042
5043 out:
5044         return err;
5045 }
5046
5047 /* tp->lock is held. */
5048 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5049 {
5050         struct fw_info info;
5051         int err, i;
5052
5053         info.text_base = TG3_FW_TEXT_ADDR;
5054         info.text_len = TG3_FW_TEXT_LEN;
5055         info.text_data = &tg3FwText[0];
5056         info.rodata_base = TG3_FW_RODATA_ADDR;
5057         info.rodata_len = TG3_FW_RODATA_LEN;
5058         info.rodata_data = &tg3FwRodata[0];
5059         info.data_base = TG3_FW_DATA_ADDR;
5060         info.data_len = TG3_FW_DATA_LEN;
5061         info.data_data = NULL;
5062
5063         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5064                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5065                                     &info);
5066         if (err)
5067                 return err;
5068
5069         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5070                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5071                                     &info);
5072         if (err)
5073                 return err;
5074
5075         /* Now startup only the RX cpu. */
5076         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5077         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5078
5079         for (i = 0; i < 5; i++) {
5080                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5081                         break;
5082                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5083                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5084                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5085                 udelay(1000);
5086         }
5087         if (i >= 5) {
5088                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5089                        "to set RX CPU PC, is %08x should be %08x\n",
5090                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5091                        TG3_FW_TEXT_ADDR);
5092                 return -ENODEV;
5093         }
5094         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5095         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5096
5097         return 0;
5098 }
5099
5100 #if TG3_TSO_SUPPORT != 0
5101
5102 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5103 #define TG3_TSO_FW_RELASE_MINOR         0x6
5104 #define TG3_TSO_FW_RELEASE_FIX          0x0
5105 #define TG3_TSO_FW_START_ADDR           0x08000000
5106 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5107 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5108 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5109 #define TG3_TSO_FW_RODATA_LEN           0x60
5110 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5111 #define TG3_TSO_FW_DATA_LEN             0x30
5112 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5113 #define TG3_TSO_FW_SBSS_LEN             0x2c
5114 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5115 #define TG3_TSO_FW_BSS_LEN              0x894
5116
5117 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5118         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5119         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5120         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5121         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5122         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5123         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5124         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5125         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5126         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5127         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5128         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5129         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5130         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5131         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5132         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5133         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5134         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5135         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5136         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5137         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5138         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5139         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5140         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5141         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5142         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5143         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5144         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5145         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5146         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5147         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5148         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5149         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5150         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5151         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5152         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5153         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5154         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5155         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5156         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5157         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5158         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5159         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5160         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5161         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5162         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5163         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5164         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5165         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5166         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5167         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5168         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5169         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5170         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5171         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5172         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5173         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5174         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5175         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5176         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5177         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5178         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5179         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5180         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5181         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5182         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5183         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5184         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5185         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5186         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5187         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5188         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5189         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5190         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5191         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5192         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5193         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5194         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5195         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5196         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5197         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5198         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5199         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5200         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5201         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5202         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5203         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5204         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5205         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5206         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5207         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5208         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5209         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5210         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5211         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5212         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5213         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5214         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5215         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5216         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5217         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5218         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5219         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5220         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5221         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5222         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5223         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5224         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5225         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5226         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5227         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5228         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5229         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5230         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5231         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5232         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5233         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5234         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5235         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5236         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5237         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5238         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5239         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5240         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5241         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5242         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5243         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5244         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5245         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5246         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5247         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5248         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5249         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5250         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5251         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5252         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5253         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5254         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5255         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5256         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5257         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5258         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5259         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5260         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5261         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5262         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5263         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5264         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5265         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5266         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5267         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5268         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5269         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5270         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5271         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5272         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5273         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5274         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5275         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5276         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5277         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5278         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5279         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5280         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5281         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5282         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5283         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5284         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5285         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5286         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5287         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5288         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5289         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5290         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5291         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5292         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5293         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5294         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5295         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5296         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5297         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5298         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5299         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5300         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5301         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5302         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5303         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5304         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5305         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5306         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5307         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5308         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5309         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5310         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5311         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5312         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5313         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5314         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5315         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5316         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5317         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5318         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5319         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5320         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5321         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5322         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5323         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5324         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5325         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5326         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5327         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5328         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5329         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5330         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5331         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5332         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5333         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5334         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5335         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5336         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5337         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5338         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5339         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5340         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5341         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5342         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5343         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5344         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5345         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5346         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5347         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5348         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5349         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5350         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5351         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5352         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5353         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5354         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5355         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5356         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5357         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5358         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5359         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5360         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5361         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5362         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5363         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5364         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5365         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5366         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5367         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5368         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5369         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5370         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5371         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5372         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5373         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5374         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5375         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5376         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5377         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5378         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5379         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5380         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5381         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5382         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5383         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5384         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5385         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5386         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5387         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5388         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5389         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5390         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5391         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5392         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5393         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5394         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5395         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5396         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5397         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5398         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5399         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5400         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5401         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5402 };
5403
5404 static u32 tg3TsoFwRodata[] = {
5405         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5406         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5407         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5408         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5409         0x00000000,
5410 };
5411
5412 static u32 tg3TsoFwData[] = {
5413         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5414         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5415         0x00000000,
5416 };
5417
5418 /* 5705 needs a special version of the TSO firmware.  */
5419 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5420 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5421 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5422 #define TG3_TSO5_FW_START_ADDR          0x00010000
5423 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5424 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5425 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5426 #define TG3_TSO5_FW_RODATA_LEN          0x50
5427 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5428 #define TG3_TSO5_FW_DATA_LEN            0x20
5429 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5430 #define TG3_TSO5_FW_SBSS_LEN            0x28
5431 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5432 #define TG3_TSO5_FW_BSS_LEN             0x88
5433
5434 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5435         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5436         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5437         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5438         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5439         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5440         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5441         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5442         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5443         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5444         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5445         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5446         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5447         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5448         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5449         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5450         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5451         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5452         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5453         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5454         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5455         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5456         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5457         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5458         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5459         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5460         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5461         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5462         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5463         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5464         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5465         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5466         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5467         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5468         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5469         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5470         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5471         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5472         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5473         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5474         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5475         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5476         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5477         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5478         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5479         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5480         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5481         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5482         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5483         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5484         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5485         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5486         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5487         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5488         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5489         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5490         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5491         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5492         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5493         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5494         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5495         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5496         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5497         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5498         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5499         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5500         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5501         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5502         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5503         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5504         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5505         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5506         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5507         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5508         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5509         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5510         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5511         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5512         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5513         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5514         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5515         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5516         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5517         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5518         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5519         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5520         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5521         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5522         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5523         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5524         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5525         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5526         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5527         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5528         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5529         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5530         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5531         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5532         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5533         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5534         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5535         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5536         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5537         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5538         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5539         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5540         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5541         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5542         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5543         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5544         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5545         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5546         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5547         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5548         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5549         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5550         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5551         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5552         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5553         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5554         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5555         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5556         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5557         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5558         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5559         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5560         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5561         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5562         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5563         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5564         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5565         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5566         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5567         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5568         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5569         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5570         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5571         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5572         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5573         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5574         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5575         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5576         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5577         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5578         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5579         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5580         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5581         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5582         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5583         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5584         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5585         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5586         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5587         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5588         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5589         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5590         0x00000000, 0x00000000, 0x00000000,
5591 };
5592
5593 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5594         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5595         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5596         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5597         0x00000000, 0x00000000, 0x00000000,
5598 };
5599
5600 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5601         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5602         0x00000000, 0x00000000, 0x00000000,
5603 };
5604
5605 /* tp->lock is held. */
5606 static int tg3_load_tso_firmware(struct tg3 *tp)
5607 {
5608         struct fw_info info;
5609         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5610         int err, i;
5611
5612         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5613                 return 0;
5614
5615         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5616                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5617                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5618                 info.text_data = &tg3Tso5FwText[0];
5619                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5620                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5621                 info.rodata_data = &tg3Tso5FwRodata[0];
5622                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5623                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5624                 info.data_data = &tg3Tso5FwData[0];
5625                 cpu_base = RX_CPU_BASE;
5626                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5627                 cpu_scratch_size = (info.text_len +
5628                                     info.rodata_len +
5629                                     info.data_len +
5630                                     TG3_TSO5_FW_SBSS_LEN +
5631                                     TG3_TSO5_FW_BSS_LEN);
5632         } else {
5633                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5634                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5635                 info.text_data = &tg3TsoFwText[0];
5636                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5637                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5638                 info.rodata_data = &tg3TsoFwRodata[0];
5639                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5640                 info.data_len = TG3_TSO_FW_DATA_LEN;
5641                 info.data_data = &tg3TsoFwData[0];
5642                 cpu_base = TX_CPU_BASE;
5643                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5644                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5645         }
5646
5647         err = tg3_load_firmware_cpu(tp, cpu_base,
5648                                     cpu_scratch_base, cpu_scratch_size,
5649                                     &info);
5650         if (err)
5651                 return err;
5652
5653         /* Now startup the cpu. */
5654         tw32(cpu_base + CPU_STATE, 0xffffffff);
5655         tw32_f(cpu_base + CPU_PC,    info.text_base);
5656
5657         for (i = 0; i < 5; i++) {
5658                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5659                         break;
5660                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5661                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5662                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5663                 udelay(1000);
5664         }
5665         if (i >= 5) {
5666                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5667                        "to set CPU PC, is %08x should be %08x\n",
5668                        tp->dev->name, tr32(cpu_base + CPU_PC),
5669                        info.text_base);
5670                 return -ENODEV;
5671         }
5672         tw32(cpu_base + CPU_STATE, 0xffffffff);
5673         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5674         return 0;
5675 }
5676
5677 #endif /* TG3_TSO_SUPPORT != 0 */
5678
5679 /* tp->lock is held. */
5680 static void __tg3_set_mac_addr(struct tg3 *tp)
5681 {
5682         u32 addr_high, addr_low;
5683         int i;
5684
5685         addr_high = ((tp->dev->dev_addr[0] << 8) |
5686                      tp->dev->dev_addr[1]);
5687         addr_low = ((tp->dev->dev_addr[2] << 24) |
5688                     (tp->dev->dev_addr[3] << 16) |
5689                     (tp->dev->dev_addr[4] <<  8) |
5690                     (tp->dev->dev_addr[5] <<  0));
5691         for (i = 0; i < 4; i++) {
5692                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5693                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5694         }
5695
5696         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5697             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5698                 for (i = 0; i < 12; i++) {
5699                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5700                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5701                 }
5702         }
5703
5704         addr_high = (tp->dev->dev_addr[0] +
5705                      tp->dev->dev_addr[1] +
5706                      tp->dev->dev_addr[2] +
5707                      tp->dev->dev_addr[3] +
5708                      tp->dev->dev_addr[4] +
5709                      tp->dev->dev_addr[5]) &
5710                 TX_BACKOFF_SEED_MASK;
5711         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5712 }
5713
5714 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5715 {
5716         struct tg3 *tp = netdev_priv(dev);
5717         struct sockaddr *addr = p;
5718
5719         if (!is_valid_ether_addr(addr->sa_data))
5720                 return -EINVAL;
5721
5722         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5723
5724         if (!netif_running(dev))
5725                 return 0;
5726
5727         spin_lock_bh(&tp->lock);
5728         __tg3_set_mac_addr(tp);
5729         spin_unlock_bh(&tp->lock);
5730
5731         return 0;
5732 }
5733
5734 /* tp->lock is held. */
5735 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5736                            dma_addr_t mapping, u32 maxlen_flags,
5737                            u32 nic_addr)
5738 {
5739         tg3_write_mem(tp,
5740                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5741                       ((u64) mapping >> 32));
5742         tg3_write_mem(tp,
5743                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5744                       ((u64) mapping & 0xffffffff));
5745         tg3_write_mem(tp,
5746                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5747                        maxlen_flags);
5748
5749         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5750                 tg3_write_mem(tp,
5751                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5752                               nic_addr);
5753 }
5754
5755 static void __tg3_set_rx_mode(struct net_device *);
5756 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5757 {
5758         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5759         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5760         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5761         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5762         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5763                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5764                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5765         }
5766         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5767         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5768         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5769                 u32 val = ec->stats_block_coalesce_usecs;
5770
5771                 if (!netif_carrier_ok(tp->dev))
5772                         val = 0;
5773
5774                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5775         }
5776 }
5777
5778 /* tp->lock is held. */
5779 static int tg3_reset_hw(struct tg3 *tp)
5780 {
5781         u32 val, rdmac_mode;
5782         int i, err, limit;
5783
5784         tg3_disable_ints(tp);
5785
5786         tg3_stop_fw(tp);
5787
5788         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5789
5790         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5791                 tg3_abort_hw(tp, 1);
5792         }
5793
5794         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
5795                 tg3_phy_reset(tp);
5796
5797         err = tg3_chip_reset(tp);
5798         if (err)
5799                 return err;
5800
5801         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5802
5803         /* This works around an issue with Athlon chipsets on
5804          * B3 tigon3 silicon.  This bit has no effect on any
5805          * other revision.  But do not set this on PCI Express
5806          * chips.
5807          */
5808         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5809                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5810         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5811
5812         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5813             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5814                 val = tr32(TG3PCI_PCISTATE);
5815                 val |= PCISTATE_RETRY_SAME_DMA;
5816                 tw32(TG3PCI_PCISTATE, val);
5817         }
5818
5819         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5820                 /* Enable some hw fixes.  */
5821                 val = tr32(TG3PCI_MSI_DATA);
5822                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5823                 tw32(TG3PCI_MSI_DATA, val);
5824         }
5825
5826         /* Descriptor ring init may make accesses to the
5827          * NIC SRAM area to setup the TX descriptors, so we
5828          * can only do this after the hardware has been
5829          * successfully reset.
5830          */
5831         tg3_init_rings(tp);
5832
5833         /* This value is determined during the probe time DMA
5834          * engine test, tg3_test_dma.
5835          */
5836         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5837
5838         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5839                           GRC_MODE_4X_NIC_SEND_RINGS |
5840                           GRC_MODE_NO_TX_PHDR_CSUM |
5841                           GRC_MODE_NO_RX_PHDR_CSUM);
5842         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5843         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5844                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5845         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5846                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5847
5848         tw32(GRC_MODE,
5849              tp->grc_mode |
5850              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5851
5852         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5853         val = tr32(GRC_MISC_CFG);
5854         val &= ~0xff;
5855         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5856         tw32(GRC_MISC_CFG, val);
5857
5858         /* Initialize MBUF/DESC pool. */
5859         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5860                 /* Do nothing.  */
5861         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5862                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5863                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5864                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5865                 else
5866                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5867                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5868                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5869         }
5870 #if TG3_TSO_SUPPORT != 0
5871         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5872                 int fw_len;
5873
5874                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5875                           TG3_TSO5_FW_RODATA_LEN +
5876                           TG3_TSO5_FW_DATA_LEN +
5877                           TG3_TSO5_FW_SBSS_LEN +
5878                           TG3_TSO5_FW_BSS_LEN);
5879                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5880                 tw32(BUFMGR_MB_POOL_ADDR,
5881                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5882                 tw32(BUFMGR_MB_POOL_SIZE,
5883                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5884         }
5885 #endif
5886
5887         if (tp->dev->mtu <= ETH_DATA_LEN) {
5888                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5889                      tp->bufmgr_config.mbuf_read_dma_low_water);
5890                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5891                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5892                 tw32(BUFMGR_MB_HIGH_WATER,
5893                      tp->bufmgr_config.mbuf_high_water);
5894         } else {
5895                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5896                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5897                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5898                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5899                 tw32(BUFMGR_MB_HIGH_WATER,
5900                      tp->bufmgr_config.mbuf_high_water_jumbo);
5901         }
5902         tw32(BUFMGR_DMA_LOW_WATER,
5903              tp->bufmgr_config.dma_low_water);
5904         tw32(BUFMGR_DMA_HIGH_WATER,
5905              tp->bufmgr_config.dma_high_water);
5906
5907         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5908         for (i = 0; i < 2000; i++) {
5909                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5910                         break;
5911                 udelay(10);
5912         }
5913         if (i >= 2000) {
5914                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5915                        tp->dev->name);
5916                 return -ENODEV;
5917         }
5918
5919         /* Setup replenish threshold. */
5920         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5921
5922         /* Initialize TG3_BDINFO's at:
5923          *  RCVDBDI_STD_BD:     standard eth size rx ring
5924          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5925          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5926          *
5927          * like so:
5928          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5929          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5930          *                              ring attribute flags
5931          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5932          *
5933          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5934          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5935          *
5936          * The size of each ring is fixed in the firmware, but the location is
5937          * configurable.
5938          */
5939         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5940              ((u64) tp->rx_std_mapping >> 32));
5941         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5942              ((u64) tp->rx_std_mapping & 0xffffffff));
5943         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5944              NIC_SRAM_RX_BUFFER_DESC);
5945
5946         /* Don't even try to program the JUMBO/MINI buffer descriptor
5947          * configs on 5705.
5948          */
5949         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5950                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5951                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5952         } else {
5953                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5954                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5955
5956                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5957                      BDINFO_FLAGS_DISABLED);
5958
5959                 /* Setup replenish threshold. */
5960                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5961
5962                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5963                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5964                              ((u64) tp->rx_jumbo_mapping >> 32));
5965                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5966                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5967                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5968                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5969                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5970                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5971                 } else {
5972                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5973                              BDINFO_FLAGS_DISABLED);
5974                 }
5975
5976         }
5977
5978         /* There is only one send ring on 5705/5750, no need to explicitly
5979          * disable the others.
5980          */
5981         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5982                 /* Clear out send RCB ring in SRAM. */
5983                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5984                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5985                                       BDINFO_FLAGS_DISABLED);
5986         }
5987
5988         tp->tx_prod = 0;
5989         tp->tx_cons = 0;
5990         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5991         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5992
5993         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5994                        tp->tx_desc_mapping,
5995                        (TG3_TX_RING_SIZE <<
5996                         BDINFO_FLAGS_MAXLEN_SHIFT),
5997                        NIC_SRAM_TX_BUFFER_DESC);
5998
5999         /* There is only one receive return ring on 5705/5750, no need
6000          * to explicitly disable the others.
6001          */
6002         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6003                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6004                      i += TG3_BDINFO_SIZE) {
6005                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6006                                       BDINFO_FLAGS_DISABLED);
6007                 }
6008         }
6009
6010         tp->rx_rcb_ptr = 0;
6011         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6012
6013         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6014                        tp->rx_rcb_mapping,
6015                        (TG3_RX_RCB_RING_SIZE(tp) <<
6016                         BDINFO_FLAGS_MAXLEN_SHIFT),
6017                        0);
6018
6019         tp->rx_std_ptr = tp->rx_pending;
6020         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6021                      tp->rx_std_ptr);
6022
6023         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6024                                                 tp->rx_jumbo_pending : 0;
6025         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6026                      tp->rx_jumbo_ptr);
6027
6028         /* Initialize MAC address and backoff seed. */
6029         __tg3_set_mac_addr(tp);
6030
6031         /* MTU + ethernet header + FCS + optional VLAN tag */
6032         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6033
6034         /* The slot time is changed by tg3_setup_phy if we
6035          * run at gigabit with half duplex.
6036          */
6037         tw32(MAC_TX_LENGTHS,
6038              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6039              (6 << TX_LENGTHS_IPG_SHIFT) |
6040              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6041
6042         /* Receive rules. */
6043         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6044         tw32(RCVLPC_CONFIG, 0x0181);
6045
6046         /* Calculate RDMAC_MODE setting early, we need it to determine
6047          * the RCVLPC_STATE_ENABLE mask.
6048          */
6049         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6050                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6051                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6052                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6053                       RDMAC_MODE_LNGREAD_ENAB);
6054         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6055                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6056
6057         /* If statement applies to 5705 and 5750 PCI devices only */
6058         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6059              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6060             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6061                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6062                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6063                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6064                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6065                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6066                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6067                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6068                 }
6069         }
6070
6071         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6072                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6073
6074 #if TG3_TSO_SUPPORT != 0
6075         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6076                 rdmac_mode |= (1 << 27);
6077 #endif
6078
6079         /* Receive/send statistics. */
6080         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6081             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6082                 val = tr32(RCVLPC_STATS_ENABLE);
6083                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6084                 tw32(RCVLPC_STATS_ENABLE, val);
6085         } else {
6086                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6087         }
6088         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6089         tw32(SNDDATAI_STATSENAB, 0xffffff);
6090         tw32(SNDDATAI_STATSCTRL,
6091              (SNDDATAI_SCTRL_ENABLE |
6092               SNDDATAI_SCTRL_FASTUPD));
6093
6094         /* Setup host coalescing engine. */
6095         tw32(HOSTCC_MODE, 0);
6096         for (i = 0; i < 2000; i++) {
6097                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6098                         break;
6099                 udelay(10);
6100         }
6101
6102         __tg3_set_coalesce(tp, &tp->coal);
6103
6104         /* set status block DMA address */
6105         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6106              ((u64) tp->status_mapping >> 32));
6107         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6108              ((u64) tp->status_mapping & 0xffffffff));
6109
6110         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6111                 /* Status/statistics block address.  See tg3_timer,
6112                  * the tg3_periodic_fetch_stats call there, and
6113                  * tg3_get_stats to see how this works for 5705/5750 chips.
6114                  */
6115                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6116                      ((u64) tp->stats_mapping >> 32));
6117                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6118                      ((u64) tp->stats_mapping & 0xffffffff));
6119                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6120                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6121         }
6122
6123         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6124
6125         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6126         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6127         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6128                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6129
6130         /* Clear statistics/status block in chip, and status block in ram. */
6131         for (i = NIC_SRAM_STATS_BLK;
6132              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6133              i += sizeof(u32)) {
6134                 tg3_write_mem(tp, i, 0);
6135                 udelay(40);
6136         }
6137         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6138
6139         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6140                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6141                 /* reset to prevent losing 1st rx packet intermittently */
6142                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6143                 udelay(10);
6144         }
6145
6146         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6147                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6148         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6149         udelay(40);
6150
6151         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6152          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6153          * register to preserve the GPIO settings for LOMs. The GPIOs,
6154          * whether used as inputs or outputs, are set by boot code after
6155          * reset.
6156          */
6157         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6158                 u32 gpio_mask;
6159
6160                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6161                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
6162
6163                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6164                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6165                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6166
6167                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6168                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6169
6170                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6171
6172                 /* GPIO1 must be driven high for eeprom write protect */
6173                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6174                                        GRC_LCLCTRL_GPIO_OUTPUT1);
6175         }
6176         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6177         udelay(100);
6178
6179         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6180         tp->last_tag = 0;
6181
6182         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6183                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6184                 udelay(40);
6185         }
6186
6187         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6188                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6189                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6190                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6191                WDMAC_MODE_LNGREAD_ENAB);
6192
6193         /* If statement applies to 5705 and 5750 PCI devices only */
6194         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6195              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6196             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6197                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6198                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6199                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6200                         /* nothing */
6201                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6202                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6203                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6204                         val |= WDMAC_MODE_RX_ACCEL;
6205                 }
6206         }
6207
6208         /* Enable host coalescing bug fix */
6209         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6210             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6211                 val |= (1 << 29);
6212
6213         tw32_f(WDMAC_MODE, val);
6214         udelay(40);
6215
6216         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6217                 val = tr32(TG3PCI_X_CAPS);
6218                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6219                         val &= ~PCIX_CAPS_BURST_MASK;
6220                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6221                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6222                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6223                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6224                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6225                                 val |= (tp->split_mode_max_reqs <<
6226                                         PCIX_CAPS_SPLIT_SHIFT);
6227                 }
6228                 tw32(TG3PCI_X_CAPS, val);
6229         }
6230
6231         tw32_f(RDMAC_MODE, rdmac_mode);
6232         udelay(40);
6233
6234         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6235         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6236                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6237         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6238         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6239         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6240         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6241         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6242 #if TG3_TSO_SUPPORT != 0
6243         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6244                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6245 #endif
6246         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6247         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6248
6249         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6250                 err = tg3_load_5701_a0_firmware_fix(tp);
6251                 if (err)
6252                         return err;
6253         }
6254
6255 #if TG3_TSO_SUPPORT != 0
6256         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6257                 err = tg3_load_tso_firmware(tp);
6258                 if (err)
6259                         return err;
6260         }
6261 #endif
6262
6263         tp->tx_mode = TX_MODE_ENABLE;
6264         tw32_f(MAC_TX_MODE, tp->tx_mode);
6265         udelay(100);
6266
6267         tp->rx_mode = RX_MODE_ENABLE;
6268         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6269                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6270
6271         tw32_f(MAC_RX_MODE, tp->rx_mode);
6272         udelay(10);
6273
6274         if (tp->link_config.phy_is_low_power) {
6275                 tp->link_config.phy_is_low_power = 0;
6276                 tp->link_config.speed = tp->link_config.orig_speed;
6277                 tp->link_config.duplex = tp->link_config.orig_duplex;
6278                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6279         }
6280
6281         tp->mi_mode = MAC_MI_MODE_BASE;
6282         tw32_f(MAC_MI_MODE, tp->mi_mode);
6283         udelay(80);
6284
6285         tw32(MAC_LED_CTRL, tp->led_ctrl);
6286
6287         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6288         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6289                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6290                 udelay(10);
6291         }
6292         tw32_f(MAC_RX_MODE, tp->rx_mode);
6293         udelay(10);
6294
6295         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6296                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6297                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6298                         /* Set drive transmission level to 1.2V  */
6299                         /* only if the signal pre-emphasis bit is not set  */
6300                         val = tr32(MAC_SERDES_CFG);
6301                         val &= 0xfffff000;
6302                         val |= 0x880;
6303                         tw32(MAC_SERDES_CFG, val);
6304                 }
6305                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6306                         tw32(MAC_SERDES_CFG, 0x616000);
6307         }
6308
6309         /* Prevent chip from dropping frames when flow control
6310          * is enabled.
6311          */
6312         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6313
6314         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6315             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6316                 /* Use hardware link auto-negotiation */
6317                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6318         }
6319
6320         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6321             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6322                 u32 tmp;
6323
6324                 tmp = tr32(SERDES_RX_CTRL);
6325                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6326                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6327                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6328                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6329         }
6330
6331         err = tg3_setup_phy(tp, 1);
6332         if (err)
6333                 return err;
6334
6335         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6336                 u32 tmp;
6337
6338                 /* Clear CRC stats. */
6339                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6340                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6341                         tg3_readphy(tp, 0x14, &tmp);
6342                 }
6343         }
6344
6345         __tg3_set_rx_mode(tp->dev);
6346
6347         /* Initialize receive rules. */
6348         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6349         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6350         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6351         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6352
6353         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6354             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6355                 limit = 8;
6356         else
6357                 limit = 16;
6358         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6359                 limit -= 4;
6360         switch (limit) {
6361         case 16:
6362                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6363         case 15:
6364                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6365         case 14:
6366                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6367         case 13:
6368                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6369         case 12:
6370                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6371         case 11:
6372                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6373         case 10:
6374                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6375         case 9:
6376                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6377         case 8:
6378                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6379         case 7:
6380                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6381         case 6:
6382                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6383         case 5:
6384                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6385         case 4:
6386                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6387         case 3:
6388                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6389         case 2:
6390         case 1:
6391
6392         default:
6393                 break;
6394         };
6395
6396         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6397
6398         return 0;
6399 }
6400
6401 /* Called at device open time to get the chip ready for
6402  * packet processing.  Invoked with tp->lock held.
6403  */
6404 static int tg3_init_hw(struct tg3 *tp)
6405 {
6406         int err;
6407
6408         /* Force the chip into D0. */
6409         err = tg3_set_power_state(tp, PCI_D0);
6410         if (err)
6411                 goto out;
6412
6413         tg3_switch_clocks(tp);
6414
6415         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6416
6417         err = tg3_reset_hw(tp);
6418
6419 out:
6420         return err;
6421 }
6422
6423 #define TG3_STAT_ADD32(PSTAT, REG) \
6424 do {    u32 __val = tr32(REG); \
6425         (PSTAT)->low += __val; \
6426         if ((PSTAT)->low < __val) \
6427                 (PSTAT)->high += 1; \
6428 } while (0)
6429
6430 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6431 {
6432         struct tg3_hw_stats *sp = tp->hw_stats;
6433
6434         if (!netif_carrier_ok(tp->dev))
6435                 return;
6436
6437         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6438         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6439         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6440         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6441         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6442         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6443         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6444         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6445         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6446         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6447         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6448         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6449         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6450
6451         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6452         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6453         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6454         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6455         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6456         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6457         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6458         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6459         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6460         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6461         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6462         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6463         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6464         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6465 }
6466
6467 static void tg3_timer(unsigned long __opaque)
6468 {
6469         struct tg3 *tp = (struct tg3 *) __opaque;
6470
6471         spin_lock(&tp->lock);
6472
6473         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6474                 /* All of this garbage is because when using non-tagged
6475                  * IRQ status the mailbox/status_block protocol the chip
6476                  * uses with the cpu is race prone.
6477                  */
6478                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6479                         tw32(GRC_LOCAL_CTRL,
6480                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6481                 } else {
6482                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6483                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6484                 }
6485
6486                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6487                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6488                         spin_unlock(&tp->lock);
6489                         schedule_work(&tp->reset_task);
6490                         return;
6491                 }
6492         }
6493
6494         /* This part only runs once per second. */
6495         if (!--tp->timer_counter) {
6496                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6497                         tg3_periodic_fetch_stats(tp);
6498
6499                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6500                         u32 mac_stat;
6501                         int phy_event;
6502
6503                         mac_stat = tr32(MAC_STATUS);
6504
6505                         phy_event = 0;
6506                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6507                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6508                                         phy_event = 1;
6509                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6510                                 phy_event = 1;
6511
6512                         if (phy_event)
6513                                 tg3_setup_phy(tp, 0);
6514                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6515                         u32 mac_stat = tr32(MAC_STATUS);
6516                         int need_setup = 0;
6517
6518                         if (netif_carrier_ok(tp->dev) &&
6519                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6520                                 need_setup = 1;
6521                         }
6522                         if (! netif_carrier_ok(tp->dev) &&
6523                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6524                                          MAC_STATUS_SIGNAL_DET))) {
6525                                 need_setup = 1;
6526                         }
6527                         if (need_setup) {
6528                                 tw32_f(MAC_MODE,
6529                                      (tp->mac_mode &
6530                                       ~MAC_MODE_PORT_MODE_MASK));
6531                                 udelay(40);
6532                                 tw32_f(MAC_MODE, tp->mac_mode);
6533                                 udelay(40);
6534                                 tg3_setup_phy(tp, 0);
6535                         }
6536                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6537                         tg3_serdes_parallel_detect(tp);
6538
6539                 tp->timer_counter = tp->timer_multiplier;
6540         }
6541
6542         /* Heartbeat is only sent once every 2 seconds.  */
6543         if (!--tp->asf_counter) {
6544                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6545                         u32 val;
6546
6547                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6548                                       FWCMD_NICDRV_ALIVE2);
6549                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6550                         /* 5 seconds timeout */
6551                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6552                         val = tr32(GRC_RX_CPU_EVENT);
6553                         val |= (1 << 14);
6554                         tw32(GRC_RX_CPU_EVENT, val);
6555                 }
6556                 tp->asf_counter = tp->asf_multiplier;
6557         }
6558
6559         spin_unlock(&tp->lock);
6560
6561         tp->timer.expires = jiffies + tp->timer_offset;
6562         add_timer(&tp->timer);
6563 }
6564
6565 static int tg3_request_irq(struct tg3 *tp)
6566 {
6567         irqreturn_t (*fn)(int, void *, struct pt_regs *);
6568         unsigned long flags;
6569         struct net_device *dev = tp->dev;
6570
6571         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6572                 fn = tg3_msi;
6573                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6574                         fn = tg3_msi_1shot;
6575                 flags = SA_SAMPLE_RANDOM;
6576         } else {
6577                 fn = tg3_interrupt;
6578                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6579                         fn = tg3_interrupt_tagged;
6580                 flags = SA_SHIRQ | SA_SAMPLE_RANDOM;
6581         }
6582         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6583 }
6584
6585 static int tg3_test_interrupt(struct tg3 *tp)
6586 {
6587         struct net_device *dev = tp->dev;
6588         int err, i;
6589         u32 int_mbox = 0;
6590
6591         if (!netif_running(dev))
6592                 return -ENODEV;
6593
6594         tg3_disable_ints(tp);
6595
6596         free_irq(tp->pdev->irq, dev);
6597
6598         err = request_irq(tp->pdev->irq, tg3_test_isr,
6599                           SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6600         if (err)
6601                 return err;
6602
6603         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6604         tg3_enable_ints(tp);
6605
6606         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6607                HOSTCC_MODE_NOW);
6608
6609         for (i = 0; i < 5; i++) {
6610                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6611                                         TG3_64BIT_REG_LOW);
6612                 if (int_mbox != 0)
6613                         break;
6614                 msleep(10);
6615         }
6616
6617         tg3_disable_ints(tp);
6618
6619         free_irq(tp->pdev->irq, dev);
6620         
6621         err = tg3_request_irq(tp);
6622
6623         if (err)
6624                 return err;
6625
6626         if (int_mbox != 0)
6627                 return 0;
6628
6629         return -EIO;
6630 }
6631
6632 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6633  * successfully restored
6634  */
6635 static int tg3_test_msi(struct tg3 *tp)
6636 {
6637         struct net_device *dev = tp->dev;
6638         int err;
6639         u16 pci_cmd;
6640
6641         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6642                 return 0;
6643
6644         /* Turn off SERR reporting in case MSI terminates with Master
6645          * Abort.
6646          */
6647         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6648         pci_write_config_word(tp->pdev, PCI_COMMAND,
6649                               pci_cmd & ~PCI_COMMAND_SERR);
6650
6651         err = tg3_test_interrupt(tp);
6652
6653         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6654
6655         if (!err)
6656                 return 0;
6657
6658         /* other failures */
6659         if (err != -EIO)
6660                 return err;
6661
6662         /* MSI test failed, go back to INTx mode */
6663         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6664                "switching to INTx mode. Please report this failure to "
6665                "the PCI maintainer and include system chipset information.\n",
6666                        tp->dev->name);
6667
6668         free_irq(tp->pdev->irq, dev);
6669         pci_disable_msi(tp->pdev);
6670
6671         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6672
6673         err = tg3_request_irq(tp);
6674         if (err)
6675                 return err;
6676
6677         /* Need to reset the chip because the MSI cycle may have terminated
6678          * with Master Abort.
6679          */
6680         tg3_full_lock(tp, 1);
6681
6682         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6683         err = tg3_init_hw(tp);
6684
6685         tg3_full_unlock(tp);
6686
6687         if (err)
6688                 free_irq(tp->pdev->irq, dev);
6689
6690         return err;
6691 }
6692
6693 static int tg3_open(struct net_device *dev)
6694 {
6695         struct tg3 *tp = netdev_priv(dev);
6696         int err;
6697
6698         tg3_full_lock(tp, 0);
6699
6700         err = tg3_set_power_state(tp, PCI_D0);
6701         if (err)
6702                 return err;
6703
6704         tg3_disable_ints(tp);
6705         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6706
6707         tg3_full_unlock(tp);
6708
6709         /* The placement of this call is tied
6710          * to the setup and use of Host TX descriptors.
6711          */
6712         err = tg3_alloc_consistent(tp);
6713         if (err)
6714                 return err;
6715
6716         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6717             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6718             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6719             !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6720               (tp->pdev_peer == tp->pdev))) {
6721                 /* All MSI supporting chips should support tagged
6722                  * status.  Assert that this is the case.
6723                  */
6724                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6725                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6726                                "Not using MSI.\n", tp->dev->name);
6727                 } else if (pci_enable_msi(tp->pdev) == 0) {
6728                         u32 msi_mode;
6729
6730                         msi_mode = tr32(MSGINT_MODE);
6731                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6732                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6733                 }
6734         }
6735         err = tg3_request_irq(tp);
6736
6737         if (err) {
6738                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6739                         pci_disable_msi(tp->pdev);
6740                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6741                 }
6742                 tg3_free_consistent(tp);
6743                 return err;
6744         }
6745
6746         tg3_full_lock(tp, 0);
6747
6748         err = tg3_init_hw(tp);
6749         if (err) {
6750                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6751                 tg3_free_rings(tp);
6752         } else {
6753                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6754                         tp->timer_offset = HZ;
6755                 else
6756                         tp->timer_offset = HZ / 10;
6757
6758                 BUG_ON(tp->timer_offset > HZ);
6759                 tp->timer_counter = tp->timer_multiplier =
6760                         (HZ / tp->timer_offset);
6761                 tp->asf_counter = tp->asf_multiplier =
6762                         ((HZ / tp->timer_offset) * 2);
6763
6764                 init_timer(&tp->timer);
6765                 tp->timer.expires = jiffies + tp->timer_offset;
6766                 tp->timer.data = (unsigned long) tp;
6767                 tp->timer.function = tg3_timer;
6768         }
6769
6770         tg3_full_unlock(tp);
6771
6772         if (err) {
6773                 free_irq(tp->pdev->irq, dev);
6774                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6775                         pci_disable_msi(tp->pdev);
6776                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6777                 }
6778                 tg3_free_consistent(tp);
6779                 return err;
6780         }
6781
6782         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6783                 err = tg3_test_msi(tp);
6784
6785                 if (err) {
6786                         tg3_full_lock(tp, 0);
6787
6788                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6789                                 pci_disable_msi(tp->pdev);
6790                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6791                         }
6792                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6793                         tg3_free_rings(tp);
6794                         tg3_free_consistent(tp);
6795
6796                         tg3_full_unlock(tp);
6797
6798                         return err;
6799                 }
6800
6801                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6802                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
6803                                 u32 val = tr32(0x7c04);
6804
6805                                 tw32(0x7c04, val | (1 << 29));
6806                         }
6807                 }
6808         }
6809
6810         tg3_full_lock(tp, 0);
6811
6812         add_timer(&tp->timer);
6813         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6814         tg3_enable_ints(tp);
6815
6816         tg3_full_unlock(tp);
6817
6818         netif_start_queue(dev);
6819
6820         return 0;
6821 }
6822
6823 #if 0
6824 /*static*/ void tg3_dump_state(struct tg3 *tp)
6825 {
6826         u32 val32, val32_2, val32_3, val32_4, val32_5;
6827         u16 val16;
6828         int i;
6829
6830         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6831         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
6832         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6833                val16, val32);
6834
6835         /* MAC block */
6836         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6837                tr32(MAC_MODE), tr32(MAC_STATUS));
6838         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6839                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
6840         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6841                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
6842         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6843                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
6844
6845         /* Send data initiator control block */
6846         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6847                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
6848         printk("       SNDDATAI_STATSCTRL[%08x]\n",
6849                tr32(SNDDATAI_STATSCTRL));
6850
6851         /* Send data completion control block */
6852         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
6853
6854         /* Send BD ring selector block */
6855         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6856                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
6857
6858         /* Send BD initiator control block */
6859         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6860                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
6861
6862         /* Send BD completion control block */
6863         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
6864
6865         /* Receive list placement control block */
6866         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6867                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
6868         printk("       RCVLPC_STATSCTRL[%08x]\n",
6869                tr32(RCVLPC_STATSCTRL));
6870
6871         /* Receive data and receive BD initiator control block */
6872         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6873                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
6874
6875         /* Receive data completion control block */
6876         printk("DEBUG: RCVDCC_MODE[%08x]\n",
6877                tr32(RCVDCC_MODE));
6878
6879         /* Receive BD initiator control block */
6880         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6881                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
6882
6883         /* Receive BD completion control block */
6884         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6885                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
6886
6887         /* Receive list selector control block */
6888         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6889                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
6890
6891         /* Mbuf cluster free block */
6892         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6893                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
6894
6895         /* Host coalescing control block */
6896         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6897                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
6898         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6899                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6900                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6901         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6902                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
6903                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
6904         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6905                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
6906         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6907                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
6908
6909         /* Memory arbiter control block */
6910         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6911                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
6912
6913         /* Buffer manager control block */
6914         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6915                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
6916         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6917                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
6918         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6919                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6920                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
6921                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
6922
6923         /* Read DMA control block */
6924         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6925                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
6926
6927         /* Write DMA control block */
6928         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6929                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
6930
6931         /* DMA completion block */
6932         printk("DEBUG: DMAC_MODE[%08x]\n",
6933                tr32(DMAC_MODE));
6934
6935         /* GRC block */
6936         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6937                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
6938         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6939                tr32(GRC_LOCAL_CTRL));
6940
6941         /* TG3_BDINFOs */
6942         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6943                tr32(RCVDBDI_JUMBO_BD + 0x0),
6944                tr32(RCVDBDI_JUMBO_BD + 0x4),
6945                tr32(RCVDBDI_JUMBO_BD + 0x8),
6946                tr32(RCVDBDI_JUMBO_BD + 0xc));
6947         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6948                tr32(RCVDBDI_STD_BD + 0x0),
6949                tr32(RCVDBDI_STD_BD + 0x4),
6950                tr32(RCVDBDI_STD_BD + 0x8),
6951                tr32(RCVDBDI_STD_BD + 0xc));
6952         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6953                tr32(RCVDBDI_MINI_BD + 0x0),
6954                tr32(RCVDBDI_MINI_BD + 0x4),
6955                tr32(RCVDBDI_MINI_BD + 0x8),
6956                tr32(RCVDBDI_MINI_BD + 0xc));
6957
6958         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
6959         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
6960         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
6961         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
6962         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6963                val32, val32_2, val32_3, val32_4);
6964
6965         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
6966         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
6967         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
6968         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
6969         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6970                val32, val32_2, val32_3, val32_4);
6971
6972         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
6973         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
6974         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
6975         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
6976         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
6977         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6978                val32, val32_2, val32_3, val32_4, val32_5);
6979
6980         /* SW status block */
6981         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6982                tp->hw_status->status,
6983                tp->hw_status->status_tag,
6984                tp->hw_status->rx_jumbo_consumer,
6985                tp->hw_status->rx_consumer,
6986                tp->hw_status->rx_mini_consumer,
6987                tp->hw_status->idx[0].rx_producer,
6988                tp->hw_status->idx[0].tx_consumer);
6989
6990         /* SW statistics block */
6991         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6992                ((u32 *)tp->hw_stats)[0],
6993                ((u32 *)tp->hw_stats)[1],
6994                ((u32 *)tp->hw_stats)[2],
6995                ((u32 *)tp->hw_stats)[3]);
6996
6997         /* Mailboxes */
6998         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6999                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7000                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7001                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7002                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7003
7004         /* NIC side send descriptors. */
7005         for (i = 0; i < 6; i++) {
7006                 unsigned long txd;
7007
7008                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7009                         + (i * sizeof(struct tg3_tx_buffer_desc));
7010                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7011                        i,
7012                        readl(txd + 0x0), readl(txd + 0x4),
7013                        readl(txd + 0x8), readl(txd + 0xc));
7014         }
7015
7016         /* NIC side RX descriptors. */
7017         for (i = 0; i < 6; i++) {
7018                 unsigned long rxd;
7019
7020                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7021                         + (i * sizeof(struct tg3_rx_buffer_desc));
7022                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7023                        i,
7024                        readl(rxd + 0x0), readl(rxd + 0x4),
7025                        readl(rxd + 0x8), readl(rxd + 0xc));
7026                 rxd += (4 * sizeof(u32));
7027                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7028                        i,
7029                        readl(rxd + 0x0), readl(rxd + 0x4),
7030                        readl(rxd + 0x8), readl(rxd + 0xc));
7031         }
7032
7033         for (i = 0; i < 6; i++) {
7034                 unsigned long rxd;
7035
7036                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7037                         + (i * sizeof(struct tg3_rx_buffer_desc));
7038                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7039                        i,
7040                        readl(rxd + 0x0), readl(rxd + 0x4),
7041                        readl(rxd + 0x8), readl(rxd + 0xc));
7042                 rxd += (4 * sizeof(u32));
7043                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7044                        i,
7045                        readl(rxd + 0x0), readl(rxd + 0x4),
7046                        readl(rxd + 0x8), readl(rxd + 0xc));
7047         }
7048 }
7049 #endif
7050
7051 static struct net_device_stats *tg3_get_stats(struct net_device *);
7052 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7053
7054 static int tg3_close(struct net_device *dev)
7055 {
7056         struct tg3 *tp = netdev_priv(dev);
7057
7058         /* Calling flush_scheduled_work() may deadlock because
7059          * linkwatch_event() may be on the workqueue and it will try to get
7060          * the rtnl_lock which we are holding.
7061          */
7062         while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7063                 msleep(1);
7064
7065         netif_stop_queue(dev);
7066
7067         del_timer_sync(&tp->timer);
7068
7069         tg3_full_lock(tp, 1);
7070 #if 0
7071         tg3_dump_state(tp);
7072 #endif
7073
7074         tg3_disable_ints(tp);
7075
7076         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7077         tg3_free_rings(tp);
7078         tp->tg3_flags &=
7079                 ~(TG3_FLAG_INIT_COMPLETE |
7080                   TG3_FLAG_GOT_SERDES_FLOWCTL);
7081
7082         tg3_full_unlock(tp);
7083
7084         free_irq(tp->pdev->irq, dev);
7085         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7086                 pci_disable_msi(tp->pdev);
7087                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7088         }
7089
7090         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7091                sizeof(tp->net_stats_prev));
7092         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7093                sizeof(tp->estats_prev));
7094
7095         tg3_free_consistent(tp);
7096
7097         tg3_set_power_state(tp, PCI_D3hot);
7098
7099         netif_carrier_off(tp->dev);
7100
7101         return 0;
7102 }
7103
7104 static inline unsigned long get_stat64(tg3_stat64_t *val)
7105 {
7106         unsigned long ret;
7107
7108 #if (BITS_PER_LONG == 32)
7109         ret = val->low;
7110 #else
7111         ret = ((u64)val->high << 32) | ((u64)val->low);
7112 #endif
7113         return ret;
7114 }
7115
7116 static unsigned long calc_crc_errors(struct tg3 *tp)
7117 {
7118         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7119
7120         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7121             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7122              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7123                 u32 val;
7124
7125                 spin_lock_bh(&tp->lock);
7126                 if (!tg3_readphy(tp, 0x1e, &val)) {
7127                         tg3_writephy(tp, 0x1e, val | 0x8000);
7128                         tg3_readphy(tp, 0x14, &val);
7129                 } else
7130                         val = 0;
7131                 spin_unlock_bh(&tp->lock);
7132
7133                 tp->phy_crc_errors += val;
7134
7135                 return tp->phy_crc_errors;
7136         }
7137
7138         return get_stat64(&hw_stats->rx_fcs_errors);
7139 }
7140
7141 #define ESTAT_ADD(member) \
7142         estats->member =        old_estats->member + \
7143                                 get_stat64(&hw_stats->member)
7144
7145 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7146 {
7147         struct tg3_ethtool_stats *estats = &tp->estats;
7148         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7149         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7150
7151         if (!hw_stats)
7152                 return old_estats;
7153
7154         ESTAT_ADD(rx_octets);
7155         ESTAT_ADD(rx_fragments);
7156         ESTAT_ADD(rx_ucast_packets);
7157         ESTAT_ADD(rx_mcast_packets);
7158         ESTAT_ADD(rx_bcast_packets);
7159         ESTAT_ADD(rx_fcs_errors);
7160         ESTAT_ADD(rx_align_errors);
7161         ESTAT_ADD(rx_xon_pause_rcvd);
7162         ESTAT_ADD(rx_xoff_pause_rcvd);
7163         ESTAT_ADD(rx_mac_ctrl_rcvd);
7164         ESTAT_ADD(rx_xoff_entered);
7165         ESTAT_ADD(rx_frame_too_long_errors);
7166         ESTAT_ADD(rx_jabbers);
7167         ESTAT_ADD(rx_undersize_packets);
7168         ESTAT_ADD(rx_in_length_errors);
7169         ESTAT_ADD(rx_out_length_errors);
7170         ESTAT_ADD(rx_64_or_less_octet_packets);
7171         ESTAT_ADD(rx_65_to_127_octet_packets);
7172         ESTAT_ADD(rx_128_to_255_octet_packets);
7173         ESTAT_ADD(rx_256_to_511_octet_packets);
7174         ESTAT_ADD(rx_512_to_1023_octet_packets);
7175         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7176         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7177         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7178         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7179         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7180
7181         ESTAT_ADD(tx_octets);
7182         ESTAT_ADD(tx_collisions);
7183         ESTAT_ADD(tx_xon_sent);
7184         ESTAT_ADD(tx_xoff_sent);
7185         ESTAT_ADD(tx_flow_control);
7186         ESTAT_ADD(tx_mac_errors);
7187         ESTAT_ADD(tx_single_collisions);
7188         ESTAT_ADD(tx_mult_collisions);
7189         ESTAT_ADD(tx_deferred);
7190         ESTAT_ADD(tx_excessive_collisions);
7191         ESTAT_ADD(tx_late_collisions);
7192         ESTAT_ADD(tx_collide_2times);
7193         ESTAT_ADD(tx_collide_3times);
7194         ESTAT_ADD(tx_collide_4times);
7195         ESTAT_ADD(tx_collide_5times);
7196         ESTAT_ADD(tx_collide_6times);
7197         ESTAT_ADD(tx_collide_7times);
7198         ESTAT_ADD(tx_collide_8times);
7199         ESTAT_ADD(tx_collide_9times);
7200         ESTAT_ADD(tx_collide_10times);
7201         ESTAT_ADD(tx_collide_11times);
7202         ESTAT_ADD(tx_collide_12times);
7203         ESTAT_ADD(tx_collide_13times);
7204         ESTAT_ADD(tx_collide_14times);
7205         ESTAT_ADD(tx_collide_15times);
7206         ESTAT_ADD(tx_ucast_packets);
7207         ESTAT_ADD(tx_mcast_packets);
7208         ESTAT_ADD(tx_bcast_packets);
7209         ESTAT_ADD(tx_carrier_sense_errors);
7210         ESTAT_ADD(tx_discards);
7211         ESTAT_ADD(tx_errors);
7212
7213         ESTAT_ADD(dma_writeq_full);
7214         ESTAT_ADD(dma_write_prioq_full);
7215         ESTAT_ADD(rxbds_empty);
7216         ESTAT_ADD(rx_discards);
7217         ESTAT_ADD(rx_errors);
7218         ESTAT_ADD(rx_threshold_hit);
7219
7220         ESTAT_ADD(dma_readq_full);
7221         ESTAT_ADD(dma_read_prioq_full);
7222         ESTAT_ADD(tx_comp_queue_full);
7223
7224         ESTAT_ADD(ring_set_send_prod_index);
7225         ESTAT_ADD(ring_status_update);
7226         ESTAT_ADD(nic_irqs);
7227         ESTAT_ADD(nic_avoided_irqs);
7228         ESTAT_ADD(nic_tx_threshold_hit);
7229
7230         return estats;
7231 }
7232
7233 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7234 {
7235         struct tg3 *tp = netdev_priv(dev);
7236         struct net_device_stats *stats = &tp->net_stats;
7237         struct net_device_stats *old_stats = &tp->net_stats_prev;
7238         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7239
7240         if (!hw_stats)
7241                 return old_stats;
7242
7243         stats->rx_packets = old_stats->rx_packets +
7244                 get_stat64(&hw_stats->rx_ucast_packets) +
7245                 get_stat64(&hw_stats->rx_mcast_packets) +
7246                 get_stat64(&hw_stats->rx_bcast_packets);
7247                 
7248         stats->tx_packets = old_stats->tx_packets +
7249                 get_stat64(&hw_stats->tx_ucast_packets) +
7250                 get_stat64(&hw_stats->tx_mcast_packets) +
7251                 get_stat64(&hw_stats->tx_bcast_packets);
7252
7253         stats->rx_bytes = old_stats->rx_bytes +
7254                 get_stat64(&hw_stats->rx_octets);
7255         stats->tx_bytes = old_stats->tx_bytes +
7256                 get_stat64(&hw_stats->tx_octets);
7257
7258         stats->rx_errors = old_stats->rx_errors +
7259                 get_stat64(&hw_stats->rx_errors);
7260         stats->tx_errors = old_stats->tx_errors +
7261                 get_stat64(&hw_stats->tx_errors) +
7262                 get_stat64(&hw_stats->tx_mac_errors) +
7263                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7264                 get_stat64(&hw_stats->tx_discards);
7265
7266         stats->multicast = old_stats->multicast +
7267                 get_stat64(&hw_stats->rx_mcast_packets);
7268         stats->collisions = old_stats->collisions +
7269                 get_stat64(&hw_stats->tx_collisions);
7270
7271         stats->rx_length_errors = old_stats->rx_length_errors +
7272                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7273                 get_stat64(&hw_stats->rx_undersize_packets);
7274
7275         stats->rx_over_errors = old_stats->rx_over_errors +
7276                 get_stat64(&hw_stats->rxbds_empty);
7277         stats->rx_frame_errors = old_stats->rx_frame_errors +
7278                 get_stat64(&hw_stats->rx_align_errors);
7279         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7280                 get_stat64(&hw_stats->tx_discards);
7281         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7282                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7283
7284         stats->rx_crc_errors = old_stats->rx_crc_errors +
7285                 calc_crc_errors(tp);
7286
7287         stats->rx_missed_errors = old_stats->rx_missed_errors +
7288                 get_stat64(&hw_stats->rx_discards);
7289
7290         return stats;
7291 }
7292
7293 static inline u32 calc_crc(unsigned char *buf, int len)
7294 {
7295         u32 reg;
7296         u32 tmp;
7297         int j, k;
7298
7299         reg = 0xffffffff;
7300
7301         for (j = 0; j < len; j++) {
7302                 reg ^= buf[j];
7303
7304                 for (k = 0; k < 8; k++) {
7305                         tmp = reg & 0x01;
7306
7307                         reg >>= 1;
7308
7309                         if (tmp) {
7310                                 reg ^= 0xedb88320;
7311                         }
7312                 }
7313         }
7314
7315         return ~reg;
7316 }
7317
7318 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7319 {
7320         /* accept or reject all multicast frames */
7321         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7322         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7323         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7324         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7325 }
7326
7327 static void __tg3_set_rx_mode(struct net_device *dev)
7328 {
7329         struct tg3 *tp = netdev_priv(dev);
7330         u32 rx_mode;
7331
7332         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7333                                   RX_MODE_KEEP_VLAN_TAG);
7334
7335         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7336          * flag clear.
7337          */
7338 #if TG3_VLAN_TAG_USED
7339         if (!tp->vlgrp &&
7340             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7341                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7342 #else
7343         /* By definition, VLAN is disabled always in this
7344          * case.
7345          */
7346         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7347                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7348 #endif
7349
7350         if (dev->flags & IFF_PROMISC) {
7351                 /* Promiscuous mode. */
7352                 rx_mode |= RX_MODE_PROMISC;
7353         } else if (dev->flags & IFF_ALLMULTI) {
7354                 /* Accept all multicast. */
7355                 tg3_set_multi (tp, 1);
7356         } else if (dev->mc_count < 1) {
7357                 /* Reject all multicast. */
7358                 tg3_set_multi (tp, 0);
7359         } else {
7360                 /* Accept one or more multicast(s). */
7361                 struct dev_mc_list *mclist;
7362                 unsigned int i;
7363                 u32 mc_filter[4] = { 0, };
7364                 u32 regidx;
7365                 u32 bit;
7366                 u32 crc;
7367
7368                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7369                      i++, mclist = mclist->next) {
7370
7371                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7372                         bit = ~crc & 0x7f;
7373                         regidx = (bit & 0x60) >> 5;
7374                         bit &= 0x1f;
7375                         mc_filter[regidx] |= (1 << bit);
7376                 }
7377
7378                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7379                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7380                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7381                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7382         }
7383
7384         if (rx_mode != tp->rx_mode) {
7385                 tp->rx_mode = rx_mode;
7386                 tw32_f(MAC_RX_MODE, rx_mode);
7387                 udelay(10);
7388         }
7389 }
7390
7391 static void tg3_set_rx_mode(struct net_device *dev)
7392 {
7393         struct tg3 *tp = netdev_priv(dev);
7394
7395         if (!netif_running(dev))
7396                 return;
7397
7398         tg3_full_lock(tp, 0);
7399         __tg3_set_rx_mode(dev);
7400         tg3_full_unlock(tp);
7401 }
7402
7403 #define TG3_REGDUMP_LEN         (32 * 1024)
7404
7405 static int tg3_get_regs_len(struct net_device *dev)
7406 {
7407         return TG3_REGDUMP_LEN;
7408 }
7409
7410 static void tg3_get_regs(struct net_device *dev,
7411                 struct ethtool_regs *regs, void *_p)
7412 {
7413         u32 *p = _p;
7414         struct tg3 *tp = netdev_priv(dev);
7415         u8 *orig_p = _p;
7416         int i;
7417
7418         regs->version = 0;
7419
7420         memset(p, 0, TG3_REGDUMP_LEN);
7421
7422         if (tp->link_config.phy_is_low_power)
7423                 return;
7424
7425         tg3_full_lock(tp, 0);
7426
7427 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7428 #define GET_REG32_LOOP(base,len)                \
7429 do {    p = (u32 *)(orig_p + (base));           \
7430         for (i = 0; i < len; i += 4)            \
7431                 __GET_REG32((base) + i);        \
7432 } while (0)
7433 #define GET_REG32_1(reg)                        \
7434 do {    p = (u32 *)(orig_p + (reg));            \
7435         __GET_REG32((reg));                     \
7436 } while (0)
7437
7438         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7439         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7440         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7441         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7442         GET_REG32_1(SNDDATAC_MODE);
7443         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7444         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7445         GET_REG32_1(SNDBDC_MODE);
7446         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7447         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7448         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7449         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7450         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7451         GET_REG32_1(RCVDCC_MODE);
7452         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7453         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7454         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7455         GET_REG32_1(MBFREE_MODE);
7456         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7457         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7458         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7459         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7460         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7461         GET_REG32_1(RX_CPU_MODE);
7462         GET_REG32_1(RX_CPU_STATE);
7463         GET_REG32_1(RX_CPU_PGMCTR);
7464         GET_REG32_1(RX_CPU_HWBKPT);
7465         GET_REG32_1(TX_CPU_MODE);
7466         GET_REG32_1(TX_CPU_STATE);
7467         GET_REG32_1(TX_CPU_PGMCTR);
7468         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7469         GET_REG32_LOOP(FTQ_RESET, 0x120);
7470         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7471         GET_REG32_1(DMAC_MODE);
7472         GET_REG32_LOOP(GRC_MODE, 0x4c);
7473         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7474                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7475
7476 #undef __GET_REG32
7477 #undef GET_REG32_LOOP
7478 #undef GET_REG32_1
7479
7480         tg3_full_unlock(tp);
7481 }
7482
7483 static int tg3_get_eeprom_len(struct net_device *dev)
7484 {
7485         struct tg3 *tp = netdev_priv(dev);
7486
7487         return tp->nvram_size;
7488 }
7489
7490 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7491 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7492
7493 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7494 {
7495         struct tg3 *tp = netdev_priv(dev);
7496         int ret;
7497         u8  *pd;
7498         u32 i, offset, len, val, b_offset, b_count;
7499
7500         if (tp->link_config.phy_is_low_power)
7501                 return -EAGAIN;
7502
7503         offset = eeprom->offset;
7504         len = eeprom->len;
7505         eeprom->len = 0;
7506
7507         eeprom->magic = TG3_EEPROM_MAGIC;
7508
7509         if (offset & 3) {
7510                 /* adjustments to start on required 4 byte boundary */
7511                 b_offset = offset & 3;
7512                 b_count = 4 - b_offset;
7513                 if (b_count > len) {
7514                         /* i.e. offset=1 len=2 */
7515                         b_count = len;
7516                 }
7517                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7518                 if (ret)
7519                         return ret;
7520                 val = cpu_to_le32(val);
7521                 memcpy(data, ((char*)&val) + b_offset, b_count);
7522                 len -= b_count;
7523                 offset += b_count;
7524                 eeprom->len += b_count;
7525         }
7526
7527         /* read bytes upto the last 4 byte boundary */
7528         pd = &data[eeprom->len];
7529         for (i = 0; i < (len - (len & 3)); i += 4) {
7530                 ret = tg3_nvram_read(tp, offset + i, &val);
7531                 if (ret) {
7532                         eeprom->len += i;
7533                         return ret;
7534                 }
7535                 val = cpu_to_le32(val);
7536                 memcpy(pd + i, &val, 4);
7537         }
7538         eeprom->len += i;
7539
7540         if (len & 3) {
7541                 /* read last bytes not ending on 4 byte boundary */
7542                 pd = &data[eeprom->len];
7543                 b_count = len & 3;
7544                 b_offset = offset + len - b_count;
7545                 ret = tg3_nvram_read(tp, b_offset, &val);
7546                 if (ret)
7547                         return ret;
7548                 val = cpu_to_le32(val);
7549                 memcpy(pd, ((char*)&val), b_count);
7550                 eeprom->len += b_count;
7551         }
7552         return 0;
7553 }
7554
7555 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
7556
7557 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7558 {
7559         struct tg3 *tp = netdev_priv(dev);
7560         int ret;
7561         u32 offset, len, b_offset, odd_len, start, end;
7562         u8 *buf;
7563
7564         if (tp->link_config.phy_is_low_power)
7565                 return -EAGAIN;
7566
7567         if (eeprom->magic != TG3_EEPROM_MAGIC)
7568                 return -EINVAL;
7569
7570         offset = eeprom->offset;
7571         len = eeprom->len;
7572
7573         if ((b_offset = (offset & 3))) {
7574                 /* adjustments to start on required 4 byte boundary */
7575                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7576                 if (ret)
7577                         return ret;
7578                 start = cpu_to_le32(start);
7579                 len += b_offset;
7580                 offset &= ~3;
7581                 if (len < 4)
7582                         len = 4;
7583         }
7584
7585         odd_len = 0;
7586         if (len & 3) {
7587                 /* adjustments to end on required 4 byte boundary */
7588                 odd_len = 1;
7589                 len = (len + 3) & ~3;
7590                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7591                 if (ret)
7592                         return ret;
7593                 end = cpu_to_le32(end);
7594         }
7595
7596         buf = data;
7597         if (b_offset || odd_len) {
7598                 buf = kmalloc(len, GFP_KERNEL);
7599                 if (buf == 0)
7600                         return -ENOMEM;
7601                 if (b_offset)
7602                         memcpy(buf, &start, 4);
7603                 if (odd_len)
7604                         memcpy(buf+len-4, &end, 4);
7605                 memcpy(buf + b_offset, data, eeprom->len);
7606         }
7607
7608         ret = tg3_nvram_write_block(tp, offset, len, buf);
7609
7610         if (buf != data)
7611                 kfree(buf);
7612
7613         return ret;
7614 }
7615
7616 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7617 {
7618         struct tg3 *tp = netdev_priv(dev);
7619   
7620         cmd->supported = (SUPPORTED_Autoneg);
7621
7622         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7623                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7624                                    SUPPORTED_1000baseT_Full);
7625
7626         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
7627                 cmd->supported |= (SUPPORTED_100baseT_Half |
7628                                   SUPPORTED_100baseT_Full |
7629                                   SUPPORTED_10baseT_Half |
7630                                   SUPPORTED_10baseT_Full |
7631                                   SUPPORTED_MII);
7632         else
7633                 cmd->supported |= SUPPORTED_FIBRE;
7634   
7635         cmd->advertising = tp->link_config.advertising;
7636         if (netif_running(dev)) {
7637                 cmd->speed = tp->link_config.active_speed;
7638                 cmd->duplex = tp->link_config.active_duplex;
7639         }
7640         cmd->port = 0;
7641         cmd->phy_address = PHY_ADDR;
7642         cmd->transceiver = 0;
7643         cmd->autoneg = tp->link_config.autoneg;
7644         cmd->maxtxpkt = 0;
7645         cmd->maxrxpkt = 0;
7646         return 0;
7647 }
7648   
7649 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7650 {
7651         struct tg3 *tp = netdev_priv(dev);
7652   
7653         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 
7654                 /* These are the only valid advertisement bits allowed.  */
7655                 if (cmd->autoneg == AUTONEG_ENABLE &&
7656                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7657                                           ADVERTISED_1000baseT_Full |
7658                                           ADVERTISED_Autoneg |
7659                                           ADVERTISED_FIBRE)))
7660                         return -EINVAL;
7661                 /* Fiber can only do SPEED_1000.  */
7662                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7663                          (cmd->speed != SPEED_1000))
7664                         return -EINVAL;
7665         /* Copper cannot force SPEED_1000.  */
7666         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7667                    (cmd->speed == SPEED_1000))
7668                 return -EINVAL;
7669         else if ((cmd->speed == SPEED_1000) &&
7670                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7671                 return -EINVAL;
7672
7673         tg3_full_lock(tp, 0);
7674
7675         tp->link_config.autoneg = cmd->autoneg;
7676         if (cmd->autoneg == AUTONEG_ENABLE) {
7677                 tp->link_config.advertising = cmd->advertising;
7678                 tp->link_config.speed = SPEED_INVALID;
7679                 tp->link_config.duplex = DUPLEX_INVALID;
7680         } else {
7681                 tp->link_config.advertising = 0;
7682                 tp->link_config.speed = cmd->speed;
7683                 tp->link_config.duplex = cmd->duplex;
7684         }
7685   
7686         if (netif_running(dev))
7687                 tg3_setup_phy(tp, 1);
7688
7689         tg3_full_unlock(tp);
7690   
7691         return 0;
7692 }
7693   
7694 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7695 {
7696         struct tg3 *tp = netdev_priv(dev);
7697   
7698         strcpy(info->driver, DRV_MODULE_NAME);
7699         strcpy(info->version, DRV_MODULE_VERSION);
7700         strcpy(info->fw_version, tp->fw_ver);
7701         strcpy(info->bus_info, pci_name(tp->pdev));
7702 }
7703   
7704 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7705 {
7706         struct tg3 *tp = netdev_priv(dev);
7707   
7708         wol->supported = WAKE_MAGIC;
7709         wol->wolopts = 0;
7710         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7711                 wol->wolopts = WAKE_MAGIC;
7712         memset(&wol->sopass, 0, sizeof(wol->sopass));
7713 }
7714   
7715 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7716 {
7717         struct tg3 *tp = netdev_priv(dev);
7718   
7719         if (wol->wolopts & ~WAKE_MAGIC)
7720                 return -EINVAL;
7721         if ((wol->wolopts & WAKE_MAGIC) &&
7722             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7723             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7724                 return -EINVAL;
7725   
7726         spin_lock_bh(&tp->lock);
7727         if (wol->wolopts & WAKE_MAGIC)
7728                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7729         else
7730                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7731         spin_unlock_bh(&tp->lock);
7732   
7733         return 0;
7734 }
7735   
7736 static u32 tg3_get_msglevel(struct net_device *dev)
7737 {
7738         struct tg3 *tp = netdev_priv(dev);
7739         return tp->msg_enable;
7740 }
7741   
7742 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7743 {
7744         struct tg3 *tp = netdev_priv(dev);
7745         tp->msg_enable = value;
7746 }
7747   
7748 #if TG3_TSO_SUPPORT != 0
7749 static int tg3_set_tso(struct net_device *dev, u32 value)
7750 {
7751         struct tg3 *tp = netdev_priv(dev);
7752
7753         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7754                 if (value)
7755                         return -EINVAL;
7756                 return 0;
7757         }
7758         return ethtool_op_set_tso(dev, value);
7759 }
7760 #endif
7761   
7762 static int tg3_nway_reset(struct net_device *dev)
7763 {
7764         struct tg3 *tp = netdev_priv(dev);
7765         u32 bmcr;
7766         int r;
7767   
7768         if (!netif_running(dev))
7769                 return -EAGAIN;
7770
7771         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7772                 return -EINVAL;
7773
7774         spin_lock_bh(&tp->lock);
7775         r = -EINVAL;
7776         tg3_readphy(tp, MII_BMCR, &bmcr);
7777         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7778             ((bmcr & BMCR_ANENABLE) ||
7779              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7780                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7781                                            BMCR_ANENABLE);
7782                 r = 0;
7783         }
7784         spin_unlock_bh(&tp->lock);
7785   
7786         return r;
7787 }
7788   
7789 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7790 {
7791         struct tg3 *tp = netdev_priv(dev);
7792   
7793         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7794         ering->rx_mini_max_pending = 0;
7795         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7796                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7797         else
7798                 ering->rx_jumbo_max_pending = 0;
7799
7800         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
7801
7802         ering->rx_pending = tp->rx_pending;
7803         ering->rx_mini_pending = 0;
7804         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7805                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7806         else
7807                 ering->rx_jumbo_pending = 0;
7808
7809         ering->tx_pending = tp->tx_pending;
7810 }
7811   
7812 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7813 {
7814         struct tg3 *tp = netdev_priv(dev);
7815         int irq_sync = 0;
7816   
7817         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7818             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7819             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7820                 return -EINVAL;
7821   
7822         if (netif_running(dev)) {
7823                 tg3_netif_stop(tp);
7824                 irq_sync = 1;
7825         }
7826
7827         tg3_full_lock(tp, irq_sync);
7828   
7829         tp->rx_pending = ering->rx_pending;
7830
7831         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7832             tp->rx_pending > 63)
7833                 tp->rx_pending = 63;
7834         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7835         tp->tx_pending = ering->tx_pending;
7836
7837         if (netif_running(dev)) {
7838                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7839                 tg3_init_hw(tp);
7840                 tg3_netif_start(tp);
7841         }
7842
7843         tg3_full_unlock(tp);
7844   
7845         return 0;
7846 }
7847   
7848 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7849 {
7850         struct tg3 *tp = netdev_priv(dev);
7851   
7852         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7853         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7854         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7855 }
7856   
7857 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7858 {
7859         struct tg3 *tp = netdev_priv(dev);
7860         int irq_sync = 0;
7861   
7862         if (netif_running(dev)) {
7863                 tg3_netif_stop(tp);
7864                 irq_sync = 1;
7865         }
7866
7867         tg3_full_lock(tp, irq_sync);
7868
7869         if (epause->autoneg)
7870                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7871         else
7872                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7873         if (epause->rx_pause)
7874                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7875         else
7876                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7877         if (epause->tx_pause)
7878                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7879         else
7880                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7881
7882         if (netif_running(dev)) {
7883                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7884                 tg3_init_hw(tp);
7885                 tg3_netif_start(tp);
7886         }
7887
7888         tg3_full_unlock(tp);
7889   
7890         return 0;
7891 }
7892   
7893 static u32 tg3_get_rx_csum(struct net_device *dev)
7894 {
7895         struct tg3 *tp = netdev_priv(dev);
7896         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7897 }
7898   
7899 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7900 {
7901         struct tg3 *tp = netdev_priv(dev);
7902   
7903         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7904                 if (data != 0)
7905                         return -EINVAL;
7906                 return 0;
7907         }
7908   
7909         spin_lock_bh(&tp->lock);
7910         if (data)
7911                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7912         else
7913                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7914         spin_unlock_bh(&tp->lock);
7915   
7916         return 0;
7917 }
7918   
7919 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
7920 {
7921         struct tg3 *tp = netdev_priv(dev);
7922   
7923         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7924                 if (data != 0)
7925                         return -EINVAL;
7926                 return 0;
7927         }
7928   
7929         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7930             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7931                 ethtool_op_set_tx_hw_csum(dev, data);
7932         else
7933                 ethtool_op_set_tx_csum(dev, data);
7934
7935         return 0;
7936 }
7937
7938 static int tg3_get_stats_count (struct net_device *dev)
7939 {
7940         return TG3_NUM_STATS;
7941 }
7942
7943 static int tg3_get_test_count (struct net_device *dev)
7944 {
7945         return TG3_NUM_TEST;
7946 }
7947
7948 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7949 {
7950         switch (stringset) {
7951         case ETH_SS_STATS:
7952                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7953                 break;
7954         case ETH_SS_TEST:
7955                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7956                 break;
7957         default:
7958                 WARN_ON(1);     /* we need a WARN() */
7959                 break;
7960         }
7961 }
7962
7963 static int tg3_phys_id(struct net_device *dev, u32 data)
7964 {
7965         struct tg3 *tp = netdev_priv(dev);
7966         int i;
7967
7968         if (!netif_running(tp->dev))
7969                 return -EAGAIN;
7970
7971         if (data == 0)
7972                 data = 2;
7973
7974         for (i = 0; i < (data * 2); i++) {
7975                 if ((i % 2) == 0)
7976                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7977                                            LED_CTRL_1000MBPS_ON |
7978                                            LED_CTRL_100MBPS_ON |
7979                                            LED_CTRL_10MBPS_ON |
7980                                            LED_CTRL_TRAFFIC_OVERRIDE |
7981                                            LED_CTRL_TRAFFIC_BLINK |
7982                                            LED_CTRL_TRAFFIC_LED);
7983         
7984                 else
7985                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
7986                                            LED_CTRL_TRAFFIC_OVERRIDE);
7987
7988                 if (msleep_interruptible(500))
7989                         break;
7990         }
7991         tw32(MAC_LED_CTRL, tp->led_ctrl);
7992         return 0;
7993 }
7994
7995 static void tg3_get_ethtool_stats (struct net_device *dev,
7996                                    struct ethtool_stats *estats, u64 *tmp_stats)
7997 {
7998         struct tg3 *tp = netdev_priv(dev);
7999         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8000 }
8001
8002 #define NVRAM_TEST_SIZE 0x100
8003 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8004
8005 static int tg3_test_nvram(struct tg3 *tp)
8006 {
8007         u32 *buf, csum, magic;
8008         int i, j, err = 0, size;
8009
8010         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8011                 return -EIO;
8012
8013         if (magic == TG3_EEPROM_MAGIC)
8014                 size = NVRAM_TEST_SIZE;
8015         else if ((magic & 0xff000000) == 0xa5000000) {
8016                 if ((magic & 0xe00000) == 0x200000)
8017                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8018                 else
8019                         return 0;
8020         } else
8021                 return -EIO;
8022
8023         buf = kmalloc(size, GFP_KERNEL);
8024         if (buf == NULL)
8025                 return -ENOMEM;
8026
8027         err = -EIO;
8028         for (i = 0, j = 0; i < size; i += 4, j++) {
8029                 u32 val;
8030
8031                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8032                         break;
8033                 buf[j] = cpu_to_le32(val);
8034         }
8035         if (i < size)
8036                 goto out;
8037
8038         /* Selfboot format */
8039         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8040                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8041
8042                 for (i = 0; i < size; i++)
8043                         csum8 += buf8[i];
8044
8045                 if (csum8 == 0)
8046                         return 0;
8047                 return -EIO;
8048         }
8049
8050         /* Bootstrap checksum at offset 0x10 */
8051         csum = calc_crc((unsigned char *) buf, 0x10);
8052         if(csum != cpu_to_le32(buf[0x10/4]))
8053                 goto out;
8054
8055         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8056         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8057         if (csum != cpu_to_le32(buf[0xfc/4]))
8058                  goto out;
8059
8060         err = 0;
8061
8062 out:
8063         kfree(buf);
8064         return err;
8065 }
8066
8067 #define TG3_SERDES_TIMEOUT_SEC  2
8068 #define TG3_COPPER_TIMEOUT_SEC  6
8069
8070 static int tg3_test_link(struct tg3 *tp)
8071 {
8072         int i, max;
8073
8074         if (!netif_running(tp->dev))
8075                 return -ENODEV;
8076
8077         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8078                 max = TG3_SERDES_TIMEOUT_SEC;
8079         else
8080                 max = TG3_COPPER_TIMEOUT_SEC;
8081
8082         for (i = 0; i < max; i++) {
8083                 if (netif_carrier_ok(tp->dev))
8084                         return 0;
8085
8086                 if (msleep_interruptible(1000))
8087                         break;
8088         }
8089
8090         return -EIO;
8091 }
8092
8093 /* Only test the commonly used registers */
8094 static int tg3_test_registers(struct tg3 *tp)
8095 {
8096         int i, is_5705;
8097         u32 offset, read_mask, write_mask, val, save_val, read_val;
8098         static struct {
8099                 u16 offset;
8100                 u16 flags;
8101 #define TG3_FL_5705     0x1
8102 #define TG3_FL_NOT_5705 0x2
8103 #define TG3_FL_NOT_5788 0x4
8104                 u32 read_mask;
8105                 u32 write_mask;
8106         } reg_tbl[] = {
8107                 /* MAC Control Registers */
8108                 { MAC_MODE, TG3_FL_NOT_5705,
8109                         0x00000000, 0x00ef6f8c },
8110                 { MAC_MODE, TG3_FL_5705,
8111                         0x00000000, 0x01ef6b8c },
8112                 { MAC_STATUS, TG3_FL_NOT_5705,
8113                         0x03800107, 0x00000000 },
8114                 { MAC_STATUS, TG3_FL_5705,
8115                         0x03800100, 0x00000000 },
8116                 { MAC_ADDR_0_HIGH, 0x0000,
8117                         0x00000000, 0x0000ffff },
8118                 { MAC_ADDR_0_LOW, 0x0000,
8119                         0x00000000, 0xffffffff },
8120                 { MAC_RX_MTU_SIZE, 0x0000,
8121                         0x00000000, 0x0000ffff },
8122                 { MAC_TX_MODE, 0x0000,
8123                         0x00000000, 0x00000070 },
8124                 { MAC_TX_LENGTHS, 0x0000,
8125                         0x00000000, 0x00003fff },
8126                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8127                         0x00000000, 0x000007fc },
8128                 { MAC_RX_MODE, TG3_FL_5705,
8129                         0x00000000, 0x000007dc },
8130                 { MAC_HASH_REG_0, 0x0000,
8131                         0x00000000, 0xffffffff },
8132                 { MAC_HASH_REG_1, 0x0000,
8133                         0x00000000, 0xffffffff },
8134                 { MAC_HASH_REG_2, 0x0000,
8135                         0x00000000, 0xffffffff },
8136                 { MAC_HASH_REG_3, 0x0000,
8137                         0x00000000, 0xffffffff },
8138
8139                 /* Receive Data and Receive BD Initiator Control Registers. */
8140                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8141                         0x00000000, 0xffffffff },
8142                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8143                         0x00000000, 0xffffffff },
8144                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8145                         0x00000000, 0x00000003 },
8146                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8147                         0x00000000, 0xffffffff },
8148                 { RCVDBDI_STD_BD+0, 0x0000,
8149                         0x00000000, 0xffffffff },
8150                 { RCVDBDI_STD_BD+4, 0x0000,
8151                         0x00000000, 0xffffffff },
8152                 { RCVDBDI_STD_BD+8, 0x0000,
8153                         0x00000000, 0xffff0002 },
8154                 { RCVDBDI_STD_BD+0xc, 0x0000,
8155                         0x00000000, 0xffffffff },
8156         
8157                 /* Receive BD Initiator Control Registers. */
8158                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8159                         0x00000000, 0xffffffff },
8160                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8161                         0x00000000, 0x000003ff },
8162                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8163                         0x00000000, 0xffffffff },
8164         
8165                 /* Host Coalescing Control Registers. */
8166                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8167                         0x00000000, 0x00000004 },
8168                 { HOSTCC_MODE, TG3_FL_5705,
8169                         0x00000000, 0x000000f6 },
8170                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8171                         0x00000000, 0xffffffff },
8172                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8173                         0x00000000, 0x000003ff },
8174                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8175                         0x00000000, 0xffffffff },
8176                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8177                         0x00000000, 0x000003ff },
8178                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8179                         0x00000000, 0xffffffff },
8180                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8181                         0x00000000, 0x000000ff },
8182                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8183                         0x00000000, 0xffffffff },
8184                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8185                         0x00000000, 0x000000ff },
8186                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8187                         0x00000000, 0xffffffff },
8188                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8189                         0x00000000, 0xffffffff },
8190                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8191                         0x00000000, 0xffffffff },
8192                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8193                         0x00000000, 0x000000ff },
8194                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8195                         0x00000000, 0xffffffff },
8196                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8197                         0x00000000, 0x000000ff },
8198                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8199                         0x00000000, 0xffffffff },
8200                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8201                         0x00000000, 0xffffffff },
8202                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8203                         0x00000000, 0xffffffff },
8204                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8205                         0x00000000, 0xffffffff },
8206                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8207                         0x00000000, 0xffffffff },
8208                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8209                         0xffffffff, 0x00000000 },
8210                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8211                         0xffffffff, 0x00000000 },
8212
8213                 /* Buffer Manager Control Registers. */
8214                 { BUFMGR_MB_POOL_ADDR, 0x0000,
8215                         0x00000000, 0x007fff80 },
8216                 { BUFMGR_MB_POOL_SIZE, 0x0000,
8217                         0x00000000, 0x007fffff },
8218                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8219                         0x00000000, 0x0000003f },
8220                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8221                         0x00000000, 0x000001ff },
8222                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8223                         0x00000000, 0x000001ff },
8224                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8225                         0xffffffff, 0x00000000 },
8226                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8227                         0xffffffff, 0x00000000 },
8228         
8229                 /* Mailbox Registers */
8230                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8231                         0x00000000, 0x000001ff },
8232                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8233                         0x00000000, 0x000001ff },
8234                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8235                         0x00000000, 0x000007ff },
8236                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8237                         0x00000000, 0x000001ff },
8238
8239                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8240         };
8241
8242         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8243                 is_5705 = 1;
8244         else
8245                 is_5705 = 0;
8246
8247         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8248                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8249                         continue;
8250
8251                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8252                         continue;
8253
8254                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8255                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8256                         continue;
8257
8258                 offset = (u32) reg_tbl[i].offset;
8259                 read_mask = reg_tbl[i].read_mask;
8260                 write_mask = reg_tbl[i].write_mask;
8261
8262                 /* Save the original register content */
8263                 save_val = tr32(offset);
8264
8265                 /* Determine the read-only value. */
8266                 read_val = save_val & read_mask;
8267
8268                 /* Write zero to the register, then make sure the read-only bits
8269                  * are not changed and the read/write bits are all zeros.
8270                  */
8271                 tw32(offset, 0);
8272
8273                 val = tr32(offset);
8274
8275                 /* Test the read-only and read/write bits. */
8276                 if (((val & read_mask) != read_val) || (val & write_mask))
8277                         goto out;
8278
8279                 /* Write ones to all the bits defined by RdMask and WrMask, then
8280                  * make sure the read-only bits are not changed and the
8281                  * read/write bits are all ones.
8282                  */
8283                 tw32(offset, read_mask | write_mask);
8284
8285                 val = tr32(offset);
8286
8287                 /* Test the read-only bits. */
8288                 if ((val & read_mask) != read_val)
8289                         goto out;
8290
8291                 /* Test the read/write bits. */
8292                 if ((val & write_mask) != write_mask)
8293                         goto out;
8294
8295                 tw32(offset, save_val);
8296         }
8297
8298         return 0;
8299
8300 out:
8301         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8302         tw32(offset, save_val);
8303         return -EIO;
8304 }
8305
8306 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8307 {
8308         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8309         int i;
8310         u32 j;
8311
8312         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8313                 for (j = 0; j < len; j += 4) {
8314                         u32 val;
8315
8316                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8317                         tg3_read_mem(tp, offset + j, &val);
8318                         if (val != test_pattern[i])
8319                                 return -EIO;
8320                 }
8321         }
8322         return 0;
8323 }
8324
8325 static int tg3_test_memory(struct tg3 *tp)
8326 {
8327         static struct mem_entry {
8328                 u32 offset;
8329                 u32 len;
8330         } mem_tbl_570x[] = {
8331                 { 0x00000000, 0x00b50},
8332                 { 0x00002000, 0x1c000},
8333                 { 0xffffffff, 0x00000}
8334         }, mem_tbl_5705[] = {
8335                 { 0x00000100, 0x0000c},
8336                 { 0x00000200, 0x00008},
8337                 { 0x00004000, 0x00800},
8338                 { 0x00006000, 0x01000},
8339                 { 0x00008000, 0x02000},
8340                 { 0x00010000, 0x0e000},
8341                 { 0xffffffff, 0x00000}
8342         }, mem_tbl_5755[] = {
8343                 { 0x00000200, 0x00008},
8344                 { 0x00004000, 0x00800},
8345                 { 0x00006000, 0x00800},
8346                 { 0x00008000, 0x02000},
8347                 { 0x00010000, 0x0c000},
8348                 { 0xffffffff, 0x00000}
8349         };
8350         struct mem_entry *mem_tbl;
8351         int err = 0;
8352         int i;
8353
8354         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8355                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8356                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8357                         mem_tbl = mem_tbl_5755;
8358                 else
8359                         mem_tbl = mem_tbl_5705;
8360         } else
8361                 mem_tbl = mem_tbl_570x;
8362
8363         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8364                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8365                     mem_tbl[i].len)) != 0)
8366                         break;
8367         }
8368         
8369         return err;
8370 }
8371
8372 #define TG3_MAC_LOOPBACK        0
8373 #define TG3_PHY_LOOPBACK        1
8374
8375 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8376 {
8377         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8378         u32 desc_idx;
8379         struct sk_buff *skb, *rx_skb;
8380         u8 *tx_data;
8381         dma_addr_t map;
8382         int num_pkts, tx_len, rx_len, i, err;
8383         struct tg3_rx_buffer_desc *desc;
8384
8385         if (loopback_mode == TG3_MAC_LOOPBACK) {
8386                 /* HW errata - mac loopback fails in some cases on 5780.
8387                  * Normal traffic and PHY loopback are not affected by
8388                  * errata.
8389                  */
8390                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8391                         return 0;
8392
8393                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8394                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8395                            MAC_MODE_PORT_MODE_GMII;
8396                 tw32(MAC_MODE, mac_mode);
8397         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8398                 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8399                                            BMCR_SPEED1000);
8400                 udelay(40);
8401                 /* reset to prevent losing 1st rx packet intermittently */
8402                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8403                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8404                         udelay(10);
8405                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8406                 }
8407                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8408                            MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8409                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8410                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8411                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
8412                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8413                 }
8414                 tw32(MAC_MODE, mac_mode);
8415         }
8416         else
8417                 return -EINVAL;
8418
8419         err = -EIO;
8420
8421         tx_len = 1514;
8422         skb = dev_alloc_skb(tx_len);
8423         tx_data = skb_put(skb, tx_len);
8424         memcpy(tx_data, tp->dev->dev_addr, 6);
8425         memset(tx_data + 6, 0x0, 8);
8426
8427         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8428
8429         for (i = 14; i < tx_len; i++)
8430                 tx_data[i] = (u8) (i & 0xff);
8431
8432         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8433
8434         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8435              HOSTCC_MODE_NOW);
8436
8437         udelay(10);
8438
8439         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8440
8441         num_pkts = 0;
8442
8443         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8444
8445         tp->tx_prod++;
8446         num_pkts++;
8447
8448         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8449                      tp->tx_prod);
8450         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8451
8452         udelay(10);
8453
8454         for (i = 0; i < 10; i++) {
8455                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8456                        HOSTCC_MODE_NOW);
8457
8458                 udelay(10);
8459
8460                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8461                 rx_idx = tp->hw_status->idx[0].rx_producer;
8462                 if ((tx_idx == tp->tx_prod) &&
8463                     (rx_idx == (rx_start_idx + num_pkts)))
8464                         break;
8465         }
8466
8467         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8468         dev_kfree_skb(skb);
8469
8470         if (tx_idx != tp->tx_prod)
8471                 goto out;
8472
8473         if (rx_idx != rx_start_idx + num_pkts)
8474                 goto out;
8475
8476         desc = &tp->rx_rcb[rx_start_idx];
8477         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8478         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8479         if (opaque_key != RXD_OPAQUE_RING_STD)
8480                 goto out;
8481
8482         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8483             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8484                 goto out;
8485
8486         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8487         if (rx_len != tx_len)
8488                 goto out;
8489
8490         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8491
8492         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8493         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8494
8495         for (i = 14; i < tx_len; i++) {
8496                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8497                         goto out;
8498         }
8499         err = 0;
8500         
8501         /* tg3_free_rings will unmap and free the rx_skb */
8502 out:
8503         return err;
8504 }
8505
8506 #define TG3_MAC_LOOPBACK_FAILED         1
8507 #define TG3_PHY_LOOPBACK_FAILED         2
8508 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8509                                          TG3_PHY_LOOPBACK_FAILED)
8510
8511 static int tg3_test_loopback(struct tg3 *tp)
8512 {
8513         int err = 0;
8514
8515         if (!netif_running(tp->dev))
8516                 return TG3_LOOPBACK_FAILED;
8517
8518         tg3_reset_hw(tp);
8519
8520         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8521                 err |= TG3_MAC_LOOPBACK_FAILED;
8522         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8523                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8524                         err |= TG3_PHY_LOOPBACK_FAILED;
8525         }
8526
8527         return err;
8528 }
8529
8530 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8531                           u64 *data)
8532 {
8533         struct tg3 *tp = netdev_priv(dev);
8534
8535         if (tp->link_config.phy_is_low_power)
8536                 tg3_set_power_state(tp, PCI_D0);
8537
8538         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8539
8540         if (tg3_test_nvram(tp) != 0) {
8541                 etest->flags |= ETH_TEST_FL_FAILED;
8542                 data[0] = 1;
8543         }
8544         if (tg3_test_link(tp) != 0) {
8545                 etest->flags |= ETH_TEST_FL_FAILED;
8546                 data[1] = 1;
8547         }
8548         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8549                 int err, irq_sync = 0;
8550
8551                 if (netif_running(dev)) {
8552                         tg3_netif_stop(tp);
8553                         irq_sync = 1;
8554                 }
8555
8556                 tg3_full_lock(tp, irq_sync);
8557
8558                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8559                 err = tg3_nvram_lock(tp);
8560                 tg3_halt_cpu(tp, RX_CPU_BASE);
8561                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8562                         tg3_halt_cpu(tp, TX_CPU_BASE);
8563                 if (!err)
8564                         tg3_nvram_unlock(tp);
8565
8566                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8567                         tg3_phy_reset(tp);
8568
8569                 if (tg3_test_registers(tp) != 0) {
8570                         etest->flags |= ETH_TEST_FL_FAILED;
8571                         data[2] = 1;
8572                 }
8573                 if (tg3_test_memory(tp) != 0) {
8574                         etest->flags |= ETH_TEST_FL_FAILED;
8575                         data[3] = 1;
8576                 }
8577                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8578                         etest->flags |= ETH_TEST_FL_FAILED;
8579
8580                 tg3_full_unlock(tp);
8581
8582                 if (tg3_test_interrupt(tp) != 0) {
8583                         etest->flags |= ETH_TEST_FL_FAILED;
8584                         data[5] = 1;
8585                 }
8586
8587                 tg3_full_lock(tp, 0);
8588
8589                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8590                 if (netif_running(dev)) {
8591                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8592                         tg3_init_hw(tp);
8593                         tg3_netif_start(tp);
8594                 }
8595
8596                 tg3_full_unlock(tp);
8597         }
8598         if (tp->link_config.phy_is_low_power)
8599                 tg3_set_power_state(tp, PCI_D3hot);
8600
8601 }
8602
8603 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8604 {
8605         struct mii_ioctl_data *data = if_mii(ifr);
8606         struct tg3 *tp = netdev_priv(dev);
8607         int err;
8608
8609         switch(cmd) {
8610         case SIOCGMIIPHY:
8611                 data->phy_id = PHY_ADDR;
8612
8613                 /* fallthru */
8614         case SIOCGMIIREG: {
8615                 u32 mii_regval;
8616
8617                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8618                         break;                  /* We have no PHY */
8619
8620                 if (tp->link_config.phy_is_low_power)
8621                         return -EAGAIN;
8622
8623                 spin_lock_bh(&tp->lock);
8624                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8625                 spin_unlock_bh(&tp->lock);
8626
8627                 data->val_out = mii_regval;
8628
8629                 return err;
8630         }
8631
8632         case SIOCSMIIREG:
8633                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8634                         break;                  /* We have no PHY */
8635
8636                 if (!capable(CAP_NET_ADMIN))
8637                         return -EPERM;
8638
8639                 if (tp->link_config.phy_is_low_power)
8640                         return -EAGAIN;
8641
8642                 spin_lock_bh(&tp->lock);
8643                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8644                 spin_unlock_bh(&tp->lock);
8645
8646                 return err;
8647
8648         default:
8649                 /* do nothing */
8650                 break;
8651         }
8652         return -EOPNOTSUPP;
8653 }
8654
8655 #if TG3_VLAN_TAG_USED
8656 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8657 {
8658         struct tg3 *tp = netdev_priv(dev);
8659
8660         tg3_full_lock(tp, 0);
8661
8662         tp->vlgrp = grp;
8663
8664         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8665         __tg3_set_rx_mode(dev);
8666
8667         tg3_full_unlock(tp);
8668 }
8669
8670 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8671 {
8672         struct tg3 *tp = netdev_priv(dev);
8673
8674         tg3_full_lock(tp, 0);
8675         if (tp->vlgrp)
8676                 tp->vlgrp->vlan_devices[vid] = NULL;
8677         tg3_full_unlock(tp);
8678 }
8679 #endif
8680
8681 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8682 {
8683         struct tg3 *tp = netdev_priv(dev);
8684
8685         memcpy(ec, &tp->coal, sizeof(*ec));
8686         return 0;
8687 }
8688
8689 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8690 {
8691         struct tg3 *tp = netdev_priv(dev);
8692         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8693         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8694
8695         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8696                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8697                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8698                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8699                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8700         }
8701
8702         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8703             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8704             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8705             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8706             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8707             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8708             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8709             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8710             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8711             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8712                 return -EINVAL;
8713
8714         /* No rx interrupts will be generated if both are zero */
8715         if ((ec->rx_coalesce_usecs == 0) &&
8716             (ec->rx_max_coalesced_frames == 0))
8717                 return -EINVAL;
8718
8719         /* No tx interrupts will be generated if both are zero */
8720         if ((ec->tx_coalesce_usecs == 0) &&
8721             (ec->tx_max_coalesced_frames == 0))
8722                 return -EINVAL;
8723
8724         /* Only copy relevant parameters, ignore all others. */
8725         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8726         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8727         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8728         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8729         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8730         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8731         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8732         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8733         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8734
8735         if (netif_running(dev)) {
8736                 tg3_full_lock(tp, 0);
8737                 __tg3_set_coalesce(tp, &tp->coal);
8738                 tg3_full_unlock(tp);
8739         }
8740         return 0;
8741 }
8742
8743 static struct ethtool_ops tg3_ethtool_ops = {
8744         .get_settings           = tg3_get_settings,
8745         .set_settings           = tg3_set_settings,
8746         .get_drvinfo            = tg3_get_drvinfo,
8747         .get_regs_len           = tg3_get_regs_len,
8748         .get_regs               = tg3_get_regs,
8749         .get_wol                = tg3_get_wol,
8750         .set_wol                = tg3_set_wol,
8751         .get_msglevel           = tg3_get_msglevel,
8752         .set_msglevel           = tg3_set_msglevel,
8753         .nway_reset             = tg3_nway_reset,
8754         .get_link               = ethtool_op_get_link,
8755         .get_eeprom_len         = tg3_get_eeprom_len,
8756         .get_eeprom             = tg3_get_eeprom,
8757         .set_eeprom             = tg3_set_eeprom,
8758         .get_ringparam          = tg3_get_ringparam,
8759         .set_ringparam          = tg3_set_ringparam,
8760         .get_pauseparam         = tg3_get_pauseparam,
8761         .set_pauseparam         = tg3_set_pauseparam,
8762         .get_rx_csum            = tg3_get_rx_csum,
8763         .set_rx_csum            = tg3_set_rx_csum,
8764         .get_tx_csum            = ethtool_op_get_tx_csum,
8765         .set_tx_csum            = tg3_set_tx_csum,
8766         .get_sg                 = ethtool_op_get_sg,
8767         .set_sg                 = ethtool_op_set_sg,
8768 #if TG3_TSO_SUPPORT != 0
8769         .get_tso                = ethtool_op_get_tso,
8770         .set_tso                = tg3_set_tso,
8771 #endif
8772         .self_test_count        = tg3_get_test_count,
8773         .self_test              = tg3_self_test,
8774         .get_strings            = tg3_get_strings,
8775         .phys_id                = tg3_phys_id,
8776         .get_stats_count        = tg3_get_stats_count,
8777         .get_ethtool_stats      = tg3_get_ethtool_stats,
8778         .get_coalesce           = tg3_get_coalesce,
8779         .set_coalesce           = tg3_set_coalesce,
8780         .get_perm_addr          = ethtool_op_get_perm_addr,
8781 };
8782
8783 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8784 {
8785         u32 cursize, val, magic;
8786
8787         tp->nvram_size = EEPROM_CHIP_SIZE;
8788
8789         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8790                 return;
8791
8792         if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
8793                 return;
8794
8795         /*
8796          * Size the chip by reading offsets at increasing powers of two.
8797          * When we encounter our validation signature, we know the addressing
8798          * has wrapped around, and thus have our chip size.
8799          */
8800         cursize = 0x10;
8801
8802         while (cursize < tp->nvram_size) {
8803                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
8804                         return;
8805
8806                 if (val == magic)
8807                         break;
8808
8809                 cursize <<= 1;
8810         }
8811
8812         tp->nvram_size = cursize;
8813 }
8814                 
8815 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
8816 {
8817         u32 val;
8818
8819         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
8820                 return;
8821
8822         /* Selfboot format */
8823         if (val != TG3_EEPROM_MAGIC) {
8824                 tg3_get_eeprom_size(tp);
8825                 return;
8826         }
8827
8828         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
8829                 if (val != 0) {
8830                         tp->nvram_size = (val >> 16) * 1024;
8831                         return;
8832                 }
8833         }
8834         tp->nvram_size = 0x20000;
8835 }
8836
8837 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8838 {
8839         u32 nvcfg1;
8840
8841         nvcfg1 = tr32(NVRAM_CFG1);
8842         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
8843                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
8844         }
8845         else {
8846                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8847                 tw32(NVRAM_CFG1, nvcfg1);
8848         }
8849
8850         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
8851             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8852                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8853                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8854                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8855                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8856                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8857                                 break;
8858                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
8859                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8860                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
8861                                 break;
8862                         case FLASH_VENDOR_ATMEL_EEPROM:
8863                                 tp->nvram_jedecnum = JEDEC_ATMEL;
8864                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8865                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8866                                 break;
8867                         case FLASH_VENDOR_ST:
8868                                 tp->nvram_jedecnum = JEDEC_ST;
8869                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
8870                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8871                                 break;
8872                         case FLASH_VENDOR_SAIFUN:
8873                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
8874                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
8875                                 break;
8876                         case FLASH_VENDOR_SST_SMALL:
8877                         case FLASH_VENDOR_SST_LARGE:
8878                                 tp->nvram_jedecnum = JEDEC_SST;
8879                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
8880                                 break;
8881                 }
8882         }
8883         else {
8884                 tp->nvram_jedecnum = JEDEC_ATMEL;
8885                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
8886                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8887         }
8888 }
8889
8890 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
8891 {
8892         u32 nvcfg1;
8893
8894         nvcfg1 = tr32(NVRAM_CFG1);
8895
8896         /* NVRAM protection for TPM */
8897         if (nvcfg1 & (1 << 27))
8898                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8899
8900         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8901                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
8902                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
8903                         tp->nvram_jedecnum = JEDEC_ATMEL;
8904                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8905                         break;
8906                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8907                         tp->nvram_jedecnum = JEDEC_ATMEL;
8908                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8909                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8910                         break;
8911                 case FLASH_5752VENDOR_ST_M45PE10:
8912                 case FLASH_5752VENDOR_ST_M45PE20:
8913                 case FLASH_5752VENDOR_ST_M45PE40:
8914                         tp->nvram_jedecnum = JEDEC_ST;
8915                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8916                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8917                         break;
8918         }
8919
8920         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
8921                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
8922                         case FLASH_5752PAGE_SIZE_256:
8923                                 tp->nvram_pagesize = 256;
8924                                 break;
8925                         case FLASH_5752PAGE_SIZE_512:
8926                                 tp->nvram_pagesize = 512;
8927                                 break;
8928                         case FLASH_5752PAGE_SIZE_1K:
8929                                 tp->nvram_pagesize = 1024;
8930                                 break;
8931                         case FLASH_5752PAGE_SIZE_2K:
8932                                 tp->nvram_pagesize = 2048;
8933                                 break;
8934                         case FLASH_5752PAGE_SIZE_4K:
8935                                 tp->nvram_pagesize = 4096;
8936                                 break;
8937                         case FLASH_5752PAGE_SIZE_264:
8938                                 tp->nvram_pagesize = 264;
8939                                 break;
8940                 }
8941         }
8942         else {
8943                 /* For eeprom, set pagesize to maximum eeprom size */
8944                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8945
8946                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8947                 tw32(NVRAM_CFG1, nvcfg1);
8948         }
8949 }
8950
8951 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
8952 {
8953         u32 nvcfg1;
8954
8955         nvcfg1 = tr32(NVRAM_CFG1);
8956
8957         /* NVRAM protection for TPM */
8958         if (nvcfg1 & (1 << 27))
8959                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
8960
8961         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8962                 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
8963                 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
8964                         tp->nvram_jedecnum = JEDEC_ATMEL;
8965                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8966                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
8967
8968                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
8969                         tw32(NVRAM_CFG1, nvcfg1);
8970                         break;
8971                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
8972                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
8973                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
8974                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
8975                 case FLASH_5755VENDOR_ATMEL_FLASH_4:
8976                         tp->nvram_jedecnum = JEDEC_ATMEL;
8977                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8978                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8979                         tp->nvram_pagesize = 264;
8980                         break;
8981                 case FLASH_5752VENDOR_ST_M45PE10:
8982                 case FLASH_5752VENDOR_ST_M45PE20:
8983                 case FLASH_5752VENDOR_ST_M45PE40:
8984                         tp->nvram_jedecnum = JEDEC_ST;
8985                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
8986                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
8987                         tp->nvram_pagesize = 256;
8988                         break;
8989         }
8990 }
8991
8992 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
8993 {
8994         u32 nvcfg1;
8995
8996         nvcfg1 = tr32(NVRAM_CFG1);
8997
8998         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8999                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9000                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9001                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9002                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9003                         tp->nvram_jedecnum = JEDEC_ATMEL;
9004                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9005                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9006
9007                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9008                         tw32(NVRAM_CFG1, nvcfg1);
9009                         break;
9010                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9011                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9012                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9013                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9014                         tp->nvram_jedecnum = JEDEC_ATMEL;
9015                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9016                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9017                         tp->nvram_pagesize = 264;
9018                         break;
9019                 case FLASH_5752VENDOR_ST_M45PE10:
9020                 case FLASH_5752VENDOR_ST_M45PE20:
9021                 case FLASH_5752VENDOR_ST_M45PE40:
9022                         tp->nvram_jedecnum = JEDEC_ST;
9023                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9024                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9025                         tp->nvram_pagesize = 256;
9026                         break;
9027         }
9028 }
9029
9030 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9031 static void __devinit tg3_nvram_init(struct tg3 *tp)
9032 {
9033         int j;
9034
9035         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
9036                 return;
9037
9038         tw32_f(GRC_EEPROM_ADDR,
9039              (EEPROM_ADDR_FSM_RESET |
9040               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9041                EEPROM_ADDR_CLKPERD_SHIFT)));
9042
9043         /* XXX schedule_timeout() ... */
9044         for (j = 0; j < 100; j++)
9045                 udelay(10);
9046
9047         /* Enable seeprom accesses. */
9048         tw32_f(GRC_LOCAL_CTRL,
9049              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9050         udelay(100);
9051
9052         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9053             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9054                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9055
9056                 if (tg3_nvram_lock(tp)) {
9057                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9058                                "tg3_nvram_init failed.\n", tp->dev->name);
9059                         return;
9060                 }
9061                 tg3_enable_nvram_access(tp);
9062
9063                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9064                         tg3_get_5752_nvram_info(tp);
9065                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9066                         tg3_get_5755_nvram_info(tp);
9067                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9068                         tg3_get_5787_nvram_info(tp);
9069                 else
9070                         tg3_get_nvram_info(tp);
9071
9072                 tg3_get_nvram_size(tp);
9073
9074                 tg3_disable_nvram_access(tp);
9075                 tg3_nvram_unlock(tp);
9076
9077         } else {
9078                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9079
9080                 tg3_get_eeprom_size(tp);
9081         }
9082 }
9083
9084 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9085                                         u32 offset, u32 *val)
9086 {
9087         u32 tmp;
9088         int i;
9089
9090         if (offset > EEPROM_ADDR_ADDR_MASK ||
9091             (offset % 4) != 0)
9092                 return -EINVAL;
9093
9094         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9095                                         EEPROM_ADDR_DEVID_MASK |
9096                                         EEPROM_ADDR_READ);
9097         tw32(GRC_EEPROM_ADDR,
9098              tmp |
9099              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9100              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9101               EEPROM_ADDR_ADDR_MASK) |
9102              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9103
9104         for (i = 0; i < 10000; i++) {
9105                 tmp = tr32(GRC_EEPROM_ADDR);
9106
9107                 if (tmp & EEPROM_ADDR_COMPLETE)
9108                         break;
9109                 udelay(100);
9110         }
9111         if (!(tmp & EEPROM_ADDR_COMPLETE))
9112                 return -EBUSY;
9113
9114         *val = tr32(GRC_EEPROM_DATA);
9115         return 0;
9116 }
9117
9118 #define NVRAM_CMD_TIMEOUT 10000
9119
9120 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9121 {
9122         int i;
9123
9124         tw32(NVRAM_CMD, nvram_cmd);
9125         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9126                 udelay(10);
9127                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9128                         udelay(10);
9129                         break;
9130                 }
9131         }
9132         if (i == NVRAM_CMD_TIMEOUT) {
9133                 return -EBUSY;
9134         }
9135         return 0;
9136 }
9137
9138 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9139 {
9140         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9141             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9142             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9143             (tp->nvram_jedecnum == JEDEC_ATMEL))
9144
9145                 addr = ((addr / tp->nvram_pagesize) <<
9146                         ATMEL_AT45DB0X1B_PAGE_POS) +
9147                        (addr % tp->nvram_pagesize);
9148
9149         return addr;
9150 }
9151
9152 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9153 {
9154         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9155             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9156             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9157             (tp->nvram_jedecnum == JEDEC_ATMEL))
9158
9159                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9160                         tp->nvram_pagesize) +
9161                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9162
9163         return addr;
9164 }
9165
9166 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9167 {
9168         int ret;
9169
9170         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9171                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
9172                 return -EINVAL;
9173         }
9174
9175         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9176                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9177
9178         offset = tg3_nvram_phys_addr(tp, offset);
9179
9180         if (offset > NVRAM_ADDR_MSK)
9181                 return -EINVAL;
9182
9183         ret = tg3_nvram_lock(tp);
9184         if (ret)
9185                 return ret;
9186
9187         tg3_enable_nvram_access(tp);
9188
9189         tw32(NVRAM_ADDR, offset);
9190         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9191                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9192
9193         if (ret == 0)
9194                 *val = swab32(tr32(NVRAM_RDDATA));
9195
9196         tg3_disable_nvram_access(tp);
9197
9198         tg3_nvram_unlock(tp);
9199
9200         return ret;
9201 }
9202
9203 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9204 {
9205         int err;
9206         u32 tmp;
9207
9208         err = tg3_nvram_read(tp, offset, &tmp);
9209         *val = swab32(tmp);
9210         return err;
9211 }
9212
9213 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9214                                     u32 offset, u32 len, u8 *buf)
9215 {
9216         int i, j, rc = 0;
9217         u32 val;
9218
9219         for (i = 0; i < len; i += 4) {
9220                 u32 addr, data;
9221
9222                 addr = offset + i;
9223
9224                 memcpy(&data, buf + i, 4);
9225
9226                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9227
9228                 val = tr32(GRC_EEPROM_ADDR);
9229                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9230
9231                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9232                         EEPROM_ADDR_READ);
9233                 tw32(GRC_EEPROM_ADDR, val |
9234                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9235                         (addr & EEPROM_ADDR_ADDR_MASK) |
9236                         EEPROM_ADDR_START |
9237                         EEPROM_ADDR_WRITE);
9238                 
9239                 for (j = 0; j < 10000; j++) {
9240                         val = tr32(GRC_EEPROM_ADDR);
9241
9242                         if (val & EEPROM_ADDR_COMPLETE)
9243                                 break;
9244                         udelay(100);
9245                 }
9246                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9247                         rc = -EBUSY;
9248                         break;
9249                 }
9250         }
9251
9252         return rc;
9253 }
9254
9255 /* offset and length are dword aligned */
9256 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9257                 u8 *buf)
9258 {
9259         int ret = 0;
9260         u32 pagesize = tp->nvram_pagesize;
9261         u32 pagemask = pagesize - 1;
9262         u32 nvram_cmd;
9263         u8 *tmp;
9264
9265         tmp = kmalloc(pagesize, GFP_KERNEL);
9266         if (tmp == NULL)
9267                 return -ENOMEM;
9268
9269         while (len) {
9270                 int j;
9271                 u32 phy_addr, page_off, size;
9272
9273                 phy_addr = offset & ~pagemask;
9274         
9275                 for (j = 0; j < pagesize; j += 4) {
9276                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9277                                                 (u32 *) (tmp + j))))
9278                                 break;
9279                 }
9280                 if (ret)
9281                         break;
9282
9283                 page_off = offset & pagemask;
9284                 size = pagesize;
9285                 if (len < size)
9286                         size = len;
9287
9288                 len -= size;
9289
9290                 memcpy(tmp + page_off, buf, size);
9291
9292                 offset = offset + (pagesize - page_off);
9293
9294                 tg3_enable_nvram_access(tp);
9295
9296                 /*
9297                  * Before we can erase the flash page, we need
9298                  * to issue a special "write enable" command.
9299                  */
9300                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9301
9302                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9303                         break;
9304
9305                 /* Erase the target page */
9306                 tw32(NVRAM_ADDR, phy_addr);
9307
9308                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9309                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9310
9311                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9312                         break;
9313
9314                 /* Issue another write enable to start the write. */
9315                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9316
9317                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9318                         break;
9319
9320                 for (j = 0; j < pagesize; j += 4) {
9321                         u32 data;
9322
9323                         data = *((u32 *) (tmp + j));
9324                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9325
9326                         tw32(NVRAM_ADDR, phy_addr + j);
9327
9328                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9329                                 NVRAM_CMD_WR;
9330
9331                         if (j == 0)
9332                                 nvram_cmd |= NVRAM_CMD_FIRST;
9333                         else if (j == (pagesize - 4))
9334                                 nvram_cmd |= NVRAM_CMD_LAST;
9335
9336                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9337                                 break;
9338                 }
9339                 if (ret)
9340                         break;
9341         }
9342
9343         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9344         tg3_nvram_exec_cmd(tp, nvram_cmd);
9345
9346         kfree(tmp);
9347
9348         return ret;
9349 }
9350
9351 /* offset and length are dword aligned */
9352 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9353                 u8 *buf)
9354 {
9355         int i, ret = 0;
9356
9357         for (i = 0; i < len; i += 4, offset += 4) {
9358                 u32 data, page_off, phy_addr, nvram_cmd;
9359
9360                 memcpy(&data, buf + i, 4);
9361                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9362
9363                 page_off = offset % tp->nvram_pagesize;
9364
9365                 phy_addr = tg3_nvram_phys_addr(tp, offset);
9366
9367                 tw32(NVRAM_ADDR, phy_addr);
9368
9369                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9370
9371                 if ((page_off == 0) || (i == 0))
9372                         nvram_cmd |= NVRAM_CMD_FIRST;
9373                 else if (page_off == (tp->nvram_pagesize - 4))
9374                         nvram_cmd |= NVRAM_CMD_LAST;
9375
9376                 if (i == (len - 4))
9377                         nvram_cmd |= NVRAM_CMD_LAST;
9378
9379                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9380                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9381                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9382                     (tp->nvram_jedecnum == JEDEC_ST) &&
9383                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9384
9385                         if ((ret = tg3_nvram_exec_cmd(tp,
9386                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9387                                 NVRAM_CMD_DONE)))
9388
9389                                 break;
9390                 }
9391                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9392                         /* We always do complete word writes to eeprom. */
9393                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9394                 }
9395
9396                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9397                         break;
9398         }
9399         return ret;
9400 }
9401
9402 /* offset and length are dword aligned */
9403 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9404 {
9405         int ret;
9406
9407         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9408                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
9409                 return -EINVAL;
9410         }
9411
9412         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9413                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9414                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9415                 udelay(40);
9416         }
9417
9418         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9419                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9420         }
9421         else {
9422                 u32 grc_mode;
9423
9424                 ret = tg3_nvram_lock(tp);
9425                 if (ret)
9426                         return ret;
9427
9428                 tg3_enable_nvram_access(tp);
9429                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9430                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9431                         tw32(NVRAM_WRITE1, 0x406);
9432
9433                 grc_mode = tr32(GRC_MODE);
9434                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9435
9436                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9437                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9438
9439                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9440                                 buf);
9441                 }
9442                 else {
9443                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9444                                 buf);
9445                 }
9446
9447                 grc_mode = tr32(GRC_MODE);
9448                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9449
9450                 tg3_disable_nvram_access(tp);
9451                 tg3_nvram_unlock(tp);
9452         }
9453
9454         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9455                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9456                 udelay(40);
9457         }
9458
9459         return ret;
9460 }
9461
9462 struct subsys_tbl_ent {
9463         u16 subsys_vendor, subsys_devid;
9464         u32 phy_id;
9465 };
9466
9467 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9468         /* Broadcom boards. */
9469         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9470         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9471         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9472         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9473         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9474         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9475         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9476         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9477         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9478         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9479         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9480
9481         /* 3com boards. */
9482         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9483         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9484         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9485         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9486         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9487
9488         /* DELL boards. */
9489         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9490         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9491         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9492         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9493
9494         /* Compaq boards. */
9495         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9496         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9497         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9498         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9499         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9500
9501         /* IBM boards. */
9502         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9503 };
9504
9505 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9506 {
9507         int i;
9508
9509         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9510                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9511                      tp->pdev->subsystem_vendor) &&
9512                     (subsys_id_to_phy_id[i].subsys_devid ==
9513                      tp->pdev->subsystem_device))
9514                         return &subsys_id_to_phy_id[i];
9515         }
9516         return NULL;
9517 }
9518
9519 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9520 {
9521         u32 val;
9522         u16 pmcsr;
9523
9524         /* On some early chips the SRAM cannot be accessed in D3hot state,
9525          * so need make sure we're in D0.
9526          */
9527         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9528         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9529         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9530         msleep(1);
9531
9532         /* Make sure register accesses (indirect or otherwise)
9533          * will function correctly.
9534          */
9535         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9536                                tp->misc_host_ctrl);
9537
9538         tp->phy_id = PHY_ID_INVALID;
9539         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9540
9541         /* Do not even try poking around in here on Sun parts.  */
9542         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
9543                 return;
9544
9545         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9546         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9547                 u32 nic_cfg, led_cfg;
9548                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9549                 int eeprom_phy_serdes = 0;
9550
9551                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9552                 tp->nic_sram_data_cfg = nic_cfg;
9553
9554                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9555                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9556                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9557                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9558                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9559                     (ver > 0) && (ver < 0x100))
9560                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9561
9562                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9563                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9564                         eeprom_phy_serdes = 1;
9565
9566                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9567                 if (nic_phy_id != 0) {
9568                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9569                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9570
9571                         eeprom_phy_id  = (id1 >> 16) << 10;
9572                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9573                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9574                 } else
9575                         eeprom_phy_id = 0;
9576
9577                 tp->phy_id = eeprom_phy_id;
9578                 if (eeprom_phy_serdes) {
9579                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9580                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9581                         else
9582                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9583                 }
9584
9585                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9586                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9587                                     SHASTA_EXT_LED_MODE_MASK);
9588                 else
9589                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9590
9591                 switch (led_cfg) {
9592                 default:
9593                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9594                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9595                         break;
9596
9597                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9598                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9599                         break;
9600
9601                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9602                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9603
9604                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9605                          * read on some older 5700/5701 bootcode.
9606                          */
9607                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9608                             ASIC_REV_5700 ||
9609                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9610                             ASIC_REV_5701)
9611                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9612
9613                         break;
9614
9615                 case SHASTA_EXT_LED_SHARED:
9616                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9617                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9618                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9619                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9620                                                  LED_CTRL_MODE_PHY_2);
9621                         break;
9622
9623                 case SHASTA_EXT_LED_MAC:
9624                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9625                         break;
9626
9627                 case SHASTA_EXT_LED_COMBO:
9628                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9629                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9630                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9631                                                  LED_CTRL_MODE_PHY_2);
9632                         break;
9633
9634                 };
9635
9636                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9637                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9638                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9639                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9640
9641                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9642                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9643                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
9644                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9645
9646                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9647                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9648                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9649                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9650                 }
9651                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9652                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9653
9654                 if (cfg2 & (1 << 17))
9655                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9656
9657                 /* serdes signal pre-emphasis in register 0x590 set by */
9658                 /* bootcode if bit 18 is set */
9659                 if (cfg2 & (1 << 18))
9660                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9661         }
9662 }
9663
9664 static int __devinit tg3_phy_probe(struct tg3 *tp)
9665 {
9666         u32 hw_phy_id_1, hw_phy_id_2;
9667         u32 hw_phy_id, hw_phy_id_masked;
9668         int err;
9669
9670         /* Reading the PHY ID register can conflict with ASF
9671          * firwmare access to the PHY hardware.
9672          */
9673         err = 0;
9674         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9675                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9676         } else {
9677                 /* Now read the physical PHY_ID from the chip and verify
9678                  * that it is sane.  If it doesn't look good, we fall back
9679                  * to either the hard-coded table based PHY_ID and failing
9680                  * that the value found in the eeprom area.
9681                  */
9682                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9683                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9684
9685                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9686                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9687                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9688
9689                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9690         }
9691
9692         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9693                 tp->phy_id = hw_phy_id;
9694                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9695                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9696                 else
9697                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9698         } else {
9699                 if (tp->phy_id != PHY_ID_INVALID) {
9700                         /* Do nothing, phy ID already set up in
9701                          * tg3_get_eeprom_hw_cfg().
9702                          */
9703                 } else {
9704                         struct subsys_tbl_ent *p;
9705
9706                         /* No eeprom signature?  Try the hardcoded
9707                          * subsys device table.
9708                          */
9709                         p = lookup_by_subsys(tp);
9710                         if (!p)
9711                                 return -ENODEV;
9712
9713                         tp->phy_id = p->phy_id;
9714                         if (!tp->phy_id ||
9715                             tp->phy_id == PHY_ID_BCM8002)
9716                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9717                 }
9718         }
9719
9720         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9721             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9722                 u32 bmsr, adv_reg, tg3_ctrl;
9723
9724                 tg3_readphy(tp, MII_BMSR, &bmsr);
9725                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9726                     (bmsr & BMSR_LSTATUS))
9727                         goto skip_phy_reset;
9728                     
9729                 err = tg3_phy_reset(tp);
9730                 if (err)
9731                         return err;
9732
9733                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9734                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9735                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9736                 tg3_ctrl = 0;
9737                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9738                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9739                                     MII_TG3_CTRL_ADV_1000_FULL);
9740                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9741                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9742                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9743                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9744                 }
9745
9746                 if (!tg3_copper_is_advertising_all(tp)) {
9747                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9748
9749                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9750                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9751
9752                         tg3_writephy(tp, MII_BMCR,
9753                                      BMCR_ANENABLE | BMCR_ANRESTART);
9754                 }
9755                 tg3_phy_set_wirespeed(tp);
9756
9757                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9758                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9759                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9760         }
9761
9762 skip_phy_reset:
9763         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9764                 err = tg3_init_5401phy_dsp(tp);
9765                 if (err)
9766                         return err;
9767         }
9768
9769         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9770                 err = tg3_init_5401phy_dsp(tp);
9771         }
9772
9773         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9774                 tp->link_config.advertising =
9775                         (ADVERTISED_1000baseT_Half |
9776                          ADVERTISED_1000baseT_Full |
9777                          ADVERTISED_Autoneg |
9778                          ADVERTISED_FIBRE);
9779         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9780                 tp->link_config.advertising &=
9781                         ~(ADVERTISED_1000baseT_Half |
9782                           ADVERTISED_1000baseT_Full);
9783
9784         return err;
9785 }
9786
9787 static void __devinit tg3_read_partno(struct tg3 *tp)
9788 {
9789         unsigned char vpd_data[256];
9790         int i;
9791         u32 magic;
9792
9793         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
9794                 /* Sun decided not to put the necessary bits in the
9795                  * NVRAM of their onboard tg3 parts :(
9796                  */
9797                 strcpy(tp->board_part_number, "Sun 570X");
9798                 return;
9799         }
9800
9801         if (tg3_nvram_read_swab(tp, 0x0, &magic))
9802                 return;
9803
9804         if (magic == TG3_EEPROM_MAGIC) {
9805                 for (i = 0; i < 256; i += 4) {
9806                         u32 tmp;
9807
9808                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9809                                 goto out_not_found;
9810
9811                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
9812                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
9813                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9814                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
9815                 }
9816         } else {
9817                 int vpd_cap;
9818
9819                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
9820                 for (i = 0; i < 256; i += 4) {
9821                         u32 tmp, j = 0;
9822                         u16 tmp16;
9823
9824                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
9825                                               i);
9826                         while (j++ < 100) {
9827                                 pci_read_config_word(tp->pdev, vpd_cap +
9828                                                      PCI_VPD_ADDR, &tmp16);
9829                                 if (tmp16 & 0x8000)
9830                                         break;
9831                                 msleep(1);
9832                         }
9833                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
9834                                               &tmp);
9835                         tmp = cpu_to_le32(tmp);
9836                         memcpy(&vpd_data[i], &tmp, 4);
9837                 }
9838         }
9839
9840         /* Now parse and find the part number. */
9841         for (i = 0; i < 256; ) {
9842                 unsigned char val = vpd_data[i];
9843                 int block_end;
9844
9845                 if (val == 0x82 || val == 0x91) {
9846                         i = (i + 3 +
9847                              (vpd_data[i + 1] +
9848                               (vpd_data[i + 2] << 8)));
9849                         continue;
9850                 }
9851
9852                 if (val != 0x90)
9853                         goto out_not_found;
9854
9855                 block_end = (i + 3 +
9856                              (vpd_data[i + 1] +
9857                               (vpd_data[i + 2] << 8)));
9858                 i += 3;
9859                 while (i < block_end) {
9860                         if (vpd_data[i + 0] == 'P' &&
9861                             vpd_data[i + 1] == 'N') {
9862                                 int partno_len = vpd_data[i + 2];
9863
9864                                 if (partno_len > 24)
9865                                         goto out_not_found;
9866
9867                                 memcpy(tp->board_part_number,
9868                                        &vpd_data[i + 3],
9869                                        partno_len);
9870
9871                                 /* Success. */
9872                                 return;
9873                         }
9874                 }
9875
9876                 /* Part number not found. */
9877                 goto out_not_found;
9878         }
9879
9880 out_not_found:
9881         strcpy(tp->board_part_number, "none");
9882 }
9883
9884 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
9885 {
9886         u32 val, offset, start;
9887
9888         if (tg3_nvram_read_swab(tp, 0, &val))
9889                 return;
9890
9891         if (val != TG3_EEPROM_MAGIC)
9892                 return;
9893
9894         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
9895             tg3_nvram_read_swab(tp, 0x4, &start))
9896                 return;
9897
9898         offset = tg3_nvram_logical_addr(tp, offset);
9899         if (tg3_nvram_read_swab(tp, offset, &val))
9900                 return;
9901
9902         if ((val & 0xfc000000) == 0x0c000000) {
9903                 u32 ver_offset, addr;
9904                 int i;
9905
9906                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
9907                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
9908                         return;
9909
9910                 if (val != 0)
9911                         return;
9912
9913                 addr = offset + ver_offset - start;
9914                 for (i = 0; i < 16; i += 4) {
9915                         if (tg3_nvram_read(tp, addr + i, &val))
9916                                 return;
9917
9918                         val = cpu_to_le32(val);
9919                         memcpy(tp->fw_ver + i, &val, 4);
9920                 }
9921         }
9922 }
9923
9924 #ifdef CONFIG_SPARC64
9925 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
9926 {
9927         struct pci_dev *pdev = tp->pdev;
9928         struct pcidev_cookie *pcp = pdev->sysdata;
9929
9930         if (pcp != NULL) {
9931                 int node = pcp->prom_node;
9932                 u32 venid;
9933                 int err;
9934
9935                 err = prom_getproperty(node, "subsystem-vendor-id",
9936                                        (char *) &venid, sizeof(venid));
9937                 if (err == 0 || err == -1)
9938                         return 0;
9939                 if (venid == PCI_VENDOR_ID_SUN)
9940                         return 1;
9941
9942                 /* TG3 chips onboard the SunBlade-2500 don't have the
9943                  * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
9944                  * are distinguishable from non-Sun variants by being
9945                  * named "network" by the firmware.  Non-Sun cards will
9946                  * show up as being named "ethernet".
9947                  */
9948                 if (!strcmp(pcp->prom_name, "network"))
9949                         return 1;
9950         }
9951         return 0;
9952 }
9953 #endif
9954
9955 static int __devinit tg3_get_invariants(struct tg3 *tp)
9956 {
9957         static struct pci_device_id write_reorder_chipsets[] = {
9958                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9959                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9960                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
9961                              PCI_DEVICE_ID_VIA_8385_0) },
9962                 { },
9963         };
9964         u32 misc_ctrl_reg;
9965         u32 cacheline_sz_reg;
9966         u32 pci_state_reg, grc_misc_cfg;
9967         u32 val;
9968         u16 pci_cmd;
9969         int err;
9970
9971 #ifdef CONFIG_SPARC64
9972         if (tg3_is_sun_570X(tp))
9973                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9974 #endif
9975
9976         /* Force memory write invalidate off.  If we leave it on,
9977          * then on 5700_BX chips we have to enable a workaround.
9978          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9979          * to match the cacheline size.  The Broadcom driver have this
9980          * workaround but turns MWI off all the times so never uses
9981          * it.  This seems to suggest that the workaround is insufficient.
9982          */
9983         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9984         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
9985         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9986
9987         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9988          * has the register indirect write enable bit set before
9989          * we try to access any of the MMIO registers.  It is also
9990          * critical that the PCI-X hw workaround situation is decided
9991          * before that as well.
9992          */
9993         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9994                               &misc_ctrl_reg);
9995
9996         tp->pci_chip_rev_id = (misc_ctrl_reg >>
9997                                MISC_HOST_CTRL_CHIPREV_SHIFT);
9998
9999         /* Wrong chip ID in 5752 A0. This code can be removed later
10000          * as A0 is not in production.
10001          */
10002         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10003                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10004
10005         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10006          * we need to disable memory and use config. cycles
10007          * only to access all registers. The 5702/03 chips
10008          * can mistakenly decode the special cycles from the
10009          * ICH chipsets as memory write cycles, causing corruption
10010          * of register and memory space. Only certain ICH bridges
10011          * will drive special cycles with non-zero data during the
10012          * address phase which can fall within the 5703's address
10013          * range. This is not an ICH bug as the PCI spec allows
10014          * non-zero address during special cycles. However, only
10015          * these ICH bridges are known to drive non-zero addresses
10016          * during special cycles.
10017          *
10018          * Since special cycles do not cross PCI bridges, we only
10019          * enable this workaround if the 5703 is on the secondary
10020          * bus of these ICH bridges.
10021          */
10022         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10023             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10024                 static struct tg3_dev_id {
10025                         u32     vendor;
10026                         u32     device;
10027                         u32     rev;
10028                 } ich_chipsets[] = {
10029                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10030                           PCI_ANY_ID },
10031                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10032                           PCI_ANY_ID },
10033                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10034                           0xa },
10035                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10036                           PCI_ANY_ID },
10037                         { },
10038                 };
10039                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10040                 struct pci_dev *bridge = NULL;
10041
10042                 while (pci_id->vendor != 0) {
10043                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10044                                                 bridge);
10045                         if (!bridge) {
10046                                 pci_id++;
10047                                 continue;
10048                         }
10049                         if (pci_id->rev != PCI_ANY_ID) {
10050                                 u8 rev;
10051
10052                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
10053                                                      &rev);
10054                                 if (rev > pci_id->rev)
10055                                         continue;
10056                         }
10057                         if (bridge->subordinate &&
10058                             (bridge->subordinate->number ==
10059                              tp->pdev->bus->number)) {
10060
10061                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10062                                 pci_dev_put(bridge);
10063                                 break;
10064                         }
10065                 }
10066         }
10067
10068         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10069          * DMA addresses > 40-bit. This bridge may have other additional
10070          * 57xx devices behind it in some 4-port NIC designs for example.
10071          * Any tg3 device found behind the bridge will also need the 40-bit
10072          * DMA workaround.
10073          */
10074         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10075             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10076                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10077                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10078                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10079         }
10080         else {
10081                 struct pci_dev *bridge = NULL;
10082
10083                 do {
10084                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10085                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10086                                                 bridge);
10087                         if (bridge && bridge->subordinate &&
10088                             (bridge->subordinate->number <=
10089                              tp->pdev->bus->number) &&
10090                             (bridge->subordinate->subordinate >=
10091                              tp->pdev->bus->number)) {
10092                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10093                                 pci_dev_put(bridge);
10094                                 break;
10095                         }
10096                 } while (bridge);
10097         }
10098
10099         /* Initialize misc host control in PCI block. */
10100         tp->misc_host_ctrl |= (misc_ctrl_reg &
10101                                MISC_HOST_CTRL_CHIPREV);
10102         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10103                                tp->misc_host_ctrl);
10104
10105         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10106                               &cacheline_sz_reg);
10107
10108         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10109         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10110         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10111         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10112
10113         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10114             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10115             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10116             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10117             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10118                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10119
10120         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10121             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10122                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10123
10124         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10125                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10126                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10127                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10128                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10129                 } else
10130                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1;
10131         }
10132
10133         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10134             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10135             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10136             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10137             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
10138                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10139
10140         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10141                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10142
10143         /* If we have an AMD 762 or VIA K8T800 chipset, write
10144          * reordering to the mailbox registers done by the host
10145          * controller can cause major troubles.  We read back from
10146          * every mailbox register write to force the writes to be
10147          * posted to the chip in order.
10148          */
10149         if (pci_dev_present(write_reorder_chipsets) &&
10150             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10151                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10152
10153         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10154             tp->pci_lat_timer < 64) {
10155                 tp->pci_lat_timer = 64;
10156
10157                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10158                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10159                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10160                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10161
10162                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10163                                        cacheline_sz_reg);
10164         }
10165
10166         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10167                               &pci_state_reg);
10168
10169         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10170                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10171
10172                 /* If this is a 5700 BX chipset, and we are in PCI-X
10173                  * mode, enable register write workaround.
10174                  *
10175                  * The workaround is to use indirect register accesses
10176                  * for all chip writes not to mailbox registers.
10177                  */
10178                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10179                         u32 pm_reg;
10180                         u16 pci_cmd;
10181
10182                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10183
10184                         /* The chip can have it's power management PCI config
10185                          * space registers clobbered due to this bug.
10186                          * So explicitly force the chip into D0 here.
10187                          */
10188                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10189                                               &pm_reg);
10190                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10191                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10192                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10193                                                pm_reg);
10194
10195                         /* Also, force SERR#/PERR# in PCI command. */
10196                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10197                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10198                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10199                 }
10200         }
10201
10202         /* 5700 BX chips need to have their TX producer index mailboxes
10203          * written twice to workaround a bug.
10204          */
10205         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10206                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10207
10208         /* Back to back register writes can cause problems on this chip,
10209          * the workaround is to read back all reg writes except those to
10210          * mailbox regs.  See tg3_write_indirect_reg32().
10211          *
10212          * PCI Express 5750_A0 rev chips need this workaround too.
10213          */
10214         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10215             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10216              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10217                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10218
10219         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10220                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10221         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10222                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10223
10224         /* Chip-specific fixup from Broadcom driver */
10225         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10226             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10227                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10228                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10229         }
10230
10231         /* Default fast path register access methods */
10232         tp->read32 = tg3_read32;
10233         tp->write32 = tg3_write32;
10234         tp->read32_mbox = tg3_read32;
10235         tp->write32_mbox = tg3_write32;
10236         tp->write32_tx_mbox = tg3_write32;
10237         tp->write32_rx_mbox = tg3_write32;
10238
10239         /* Various workaround register access methods */
10240         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10241                 tp->write32 = tg3_write_indirect_reg32;
10242         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10243                 tp->write32 = tg3_write_flush_reg32;
10244
10245         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10246             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10247                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10248                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10249                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10250         }
10251
10252         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10253                 tp->read32 = tg3_read_indirect_reg32;
10254                 tp->write32 = tg3_write_indirect_reg32;
10255                 tp->read32_mbox = tg3_read_indirect_mbox;
10256                 tp->write32_mbox = tg3_write_indirect_mbox;
10257                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10258                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10259
10260                 iounmap(tp->regs);
10261                 tp->regs = NULL;
10262
10263                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10264                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10265                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10266         }
10267
10268         /* Get eeprom hw config before calling tg3_set_power_state().
10269          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10270          * determined before calling tg3_set_power_state() so that
10271          * we know whether or not to switch out of Vaux power.
10272          * When the flag is set, it means that GPIO1 is used for eeprom
10273          * write protect and also implies that it is a LOM where GPIOs
10274          * are not used to switch power.
10275          */ 
10276         tg3_get_eeprom_hw_cfg(tp);
10277
10278         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10279          * GPIO1 driven high will bring 5700's external PHY out of reset.
10280          * It is also used as eeprom write protect on LOMs.
10281          */
10282         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10283         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10284             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10285                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10286                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10287         /* Unused GPIO3 must be driven as output on 5752 because there
10288          * are no pull-up resistors on unused GPIO pins.
10289          */
10290         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10291                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10292
10293         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10294                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10295
10296         /* Force the chip into D0. */
10297         err = tg3_set_power_state(tp, PCI_D0);
10298         if (err) {
10299                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10300                        pci_name(tp->pdev));
10301                 return err;
10302         }
10303
10304         /* 5700 B0 chips do not support checksumming correctly due
10305          * to hardware bugs.
10306          */
10307         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10308                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10309
10310         /* Pseudo-header checksum is done by hardware logic and not
10311          * the offload processers, so make the chip do the pseudo-
10312          * header checksums on receive.  For transmit it is more
10313          * convenient to do the pseudo-header checksum in software
10314          * as Linux does that on transmit for us in all cases.
10315          */
10316         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
10317         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
10318
10319         /* Derive initial jumbo mode from MTU assigned in
10320          * ether_setup() via the alloc_etherdev() call
10321          */
10322         if (tp->dev->mtu > ETH_DATA_LEN &&
10323             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10324                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10325
10326         /* Determine WakeOnLan speed to use. */
10327         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10328             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10329             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10330             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10331                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10332         } else {
10333                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10334         }
10335
10336         /* A few boards don't want Ethernet@WireSpeed phy feature */
10337         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10338             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10339              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10340              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10341             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10342                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10343
10344         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10345             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10346                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10347         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10348                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10349
10350         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
10351             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
10352             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787))
10353                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10354
10355         tp->coalesce_mode = 0;
10356         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10357             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10358                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10359
10360         /* Initialize MAC MI mode, polling disabled. */
10361         tw32_f(MAC_MI_MODE, tp->mi_mode);
10362         udelay(80);
10363
10364         /* Initialize data/descriptor byte/word swapping. */
10365         val = tr32(GRC_MODE);
10366         val &= GRC_MODE_HOST_STACKUP;
10367         tw32(GRC_MODE, val | tp->grc_mode);
10368
10369         tg3_switch_clocks(tp);
10370
10371         /* Clear this out for sanity. */
10372         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10373
10374         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10375                               &pci_state_reg);
10376         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10377             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10378                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10379
10380                 if (chiprevid == CHIPREV_ID_5701_A0 ||
10381                     chiprevid == CHIPREV_ID_5701_B0 ||
10382                     chiprevid == CHIPREV_ID_5701_B2 ||
10383                     chiprevid == CHIPREV_ID_5701_B5) {
10384                         void __iomem *sram_base;
10385
10386                         /* Write some dummy words into the SRAM status block
10387                          * area, see if it reads back correctly.  If the return
10388                          * value is bad, force enable the PCIX workaround.
10389                          */
10390                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10391
10392                         writel(0x00000000, sram_base);
10393                         writel(0x00000000, sram_base + 4);
10394                         writel(0xffffffff, sram_base + 4);
10395                         if (readl(sram_base) != 0x00000000)
10396                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10397                 }
10398         }
10399
10400         udelay(50);
10401         tg3_nvram_init(tp);
10402
10403         grc_misc_cfg = tr32(GRC_MISC_CFG);
10404         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10405
10406         /* Broadcom's driver says that CIOBE multisplit has a bug */
10407 #if 0
10408         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10409             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10410                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10411                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10412         }
10413 #endif
10414         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10415             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10416              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10417                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10418
10419         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10420             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10421                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10422         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10423                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10424                                       HOSTCC_MODE_CLRTICK_TXBD);
10425
10426                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10427                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10428                                        tp->misc_host_ctrl);
10429         }
10430
10431         /* these are limited to 10/100 only */
10432         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10433              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10434             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10435              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10436              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10437               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10438               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10439             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10440              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10441               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10442                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10443
10444         err = tg3_phy_probe(tp);
10445         if (err) {
10446                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10447                        pci_name(tp->pdev), err);
10448                 /* ... but do not return immediately ... */
10449         }
10450
10451         tg3_read_partno(tp);
10452         tg3_read_fw_ver(tp);
10453
10454         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10455                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10456         } else {
10457                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10458                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10459                 else
10460                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10461         }
10462
10463         /* 5700 {AX,BX} chips have a broken status block link
10464          * change bit implementation, so we must use the
10465          * status register in those cases.
10466          */
10467         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10468                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10469         else
10470                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10471
10472         /* The led_ctrl is set during tg3_phy_probe, here we might
10473          * have to force the link status polling mechanism based
10474          * upon subsystem IDs.
10475          */
10476         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10477             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10478                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10479                                   TG3_FLAG_USE_LINKCHG_REG);
10480         }
10481
10482         /* For all SERDES we poll the MAC status register. */
10483         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10484                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10485         else
10486                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10487
10488         /* All chips before 5787 can get confused if TX buffers
10489          * straddle the 4GB address boundary in some cases.
10490          */
10491         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10492             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10493                 tp->dev->hard_start_xmit = tg3_start_xmit;
10494         else
10495                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10496
10497         tp->rx_offset = 2;
10498         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10499             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10500                 tp->rx_offset = 0;
10501
10502         /* By default, disable wake-on-lan.  User can change this
10503          * using ETHTOOL_SWOL.
10504          */
10505         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10506
10507         return err;
10508 }
10509
10510 #ifdef CONFIG_SPARC64
10511 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10512 {
10513         struct net_device *dev = tp->dev;
10514         struct pci_dev *pdev = tp->pdev;
10515         struct pcidev_cookie *pcp = pdev->sysdata;
10516
10517         if (pcp != NULL) {
10518                 int node = pcp->prom_node;
10519
10520                 if (prom_getproplen(node, "local-mac-address") == 6) {
10521                         prom_getproperty(node, "local-mac-address",
10522                                          dev->dev_addr, 6);
10523                         memcpy(dev->perm_addr, dev->dev_addr, 6);
10524                         return 0;
10525                 }
10526         }
10527         return -ENODEV;
10528 }
10529
10530 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10531 {
10532         struct net_device *dev = tp->dev;
10533
10534         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10535         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10536         return 0;
10537 }
10538 #endif
10539
10540 static int __devinit tg3_get_device_address(struct tg3 *tp)
10541 {
10542         struct net_device *dev = tp->dev;
10543         u32 hi, lo, mac_offset;
10544         int addr_ok = 0;
10545
10546 #ifdef CONFIG_SPARC64
10547         if (!tg3_get_macaddr_sparc(tp))
10548                 return 0;
10549 #endif
10550
10551         mac_offset = 0x7c;
10552         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10553              !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
10554             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10555                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10556                         mac_offset = 0xcc;
10557                 if (tg3_nvram_lock(tp))
10558                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10559                 else
10560                         tg3_nvram_unlock(tp);
10561         }
10562
10563         /* First try to get it from MAC address mailbox. */
10564         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10565         if ((hi >> 16) == 0x484b) {
10566                 dev->dev_addr[0] = (hi >>  8) & 0xff;
10567                 dev->dev_addr[1] = (hi >>  0) & 0xff;
10568
10569                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10570                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10571                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10572                 dev->dev_addr[4] = (lo >>  8) & 0xff;
10573                 dev->dev_addr[5] = (lo >>  0) & 0xff;
10574
10575                 /* Some old bootcode may report a 0 MAC address in SRAM */
10576                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10577         }
10578         if (!addr_ok) {
10579                 /* Next, try NVRAM. */
10580                 if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
10581                     !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10582                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10583                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
10584                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
10585                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
10586                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
10587                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
10588                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
10589                 }
10590                 /* Finally just fetch it out of the MAC control regs. */
10591                 else {
10592                         hi = tr32(MAC_ADDR_0_HIGH);
10593                         lo = tr32(MAC_ADDR_0_LOW);
10594
10595                         dev->dev_addr[5] = lo & 0xff;
10596                         dev->dev_addr[4] = (lo >> 8) & 0xff;
10597                         dev->dev_addr[3] = (lo >> 16) & 0xff;
10598                         dev->dev_addr[2] = (lo >> 24) & 0xff;
10599                         dev->dev_addr[1] = hi & 0xff;
10600                         dev->dev_addr[0] = (hi >> 8) & 0xff;
10601                 }
10602         }
10603
10604         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10605 #ifdef CONFIG_SPARC64
10606                 if (!tg3_get_default_macaddr_sparc(tp))
10607                         return 0;
10608 #endif
10609                 return -EINVAL;
10610         }
10611         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10612         return 0;
10613 }
10614
10615 #define BOUNDARY_SINGLE_CACHELINE       1
10616 #define BOUNDARY_MULTI_CACHELINE        2
10617
10618 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10619 {
10620         int cacheline_size;
10621         u8 byte;
10622         int goal;
10623
10624         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10625         if (byte == 0)
10626                 cacheline_size = 1024;
10627         else
10628                 cacheline_size = (int) byte * 4;
10629
10630         /* On 5703 and later chips, the boundary bits have no
10631          * effect.
10632          */
10633         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10634             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10635             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10636                 goto out;
10637
10638 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10639         goal = BOUNDARY_MULTI_CACHELINE;
10640 #else
10641 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10642         goal = BOUNDARY_SINGLE_CACHELINE;
10643 #else
10644         goal = 0;
10645 #endif
10646 #endif
10647
10648         if (!goal)
10649                 goto out;
10650
10651         /* PCI controllers on most RISC systems tend to disconnect
10652          * when a device tries to burst across a cache-line boundary.
10653          * Therefore, letting tg3 do so just wastes PCI bandwidth.
10654          *
10655          * Unfortunately, for PCI-E there are only limited
10656          * write-side controls for this, and thus for reads
10657          * we will still get the disconnects.  We'll also waste
10658          * these PCI cycles for both read and write for chips
10659          * other than 5700 and 5701 which do not implement the
10660          * boundary bits.
10661          */
10662         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10663             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10664                 switch (cacheline_size) {
10665                 case 16:
10666                 case 32:
10667                 case 64:
10668                 case 128:
10669                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10670                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10671                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10672                         } else {
10673                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10674                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10675                         }
10676                         break;
10677
10678                 case 256:
10679                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10680                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10681                         break;
10682
10683                 default:
10684                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10685                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10686                         break;
10687                 };
10688         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10689                 switch (cacheline_size) {
10690                 case 16:
10691                 case 32:
10692                 case 64:
10693                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10694                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10695                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10696                                 break;
10697                         }
10698                         /* fallthrough */
10699                 case 128:
10700                 default:
10701                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10702                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10703                         break;
10704                 };
10705         } else {
10706                 switch (cacheline_size) {
10707                 case 16:
10708                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10709                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10710                                         DMA_RWCTRL_WRITE_BNDRY_16);
10711                                 break;
10712                         }
10713                         /* fallthrough */
10714                 case 32:
10715                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10716                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10717                                         DMA_RWCTRL_WRITE_BNDRY_32);
10718                                 break;
10719                         }
10720                         /* fallthrough */
10721                 case 64:
10722                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10723                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10724                                         DMA_RWCTRL_WRITE_BNDRY_64);
10725                                 break;
10726                         }
10727                         /* fallthrough */
10728                 case 128:
10729                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10730                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10731                                         DMA_RWCTRL_WRITE_BNDRY_128);
10732                                 break;
10733                         }
10734                         /* fallthrough */
10735                 case 256:
10736                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10737                                 DMA_RWCTRL_WRITE_BNDRY_256);
10738                         break;
10739                 case 512:
10740                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10741                                 DMA_RWCTRL_WRITE_BNDRY_512);
10742                         break;
10743                 case 1024:
10744                 default:
10745                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10746                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10747                         break;
10748                 };
10749         }
10750
10751 out:
10752         return val;
10753 }
10754
10755 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10756 {
10757         struct tg3_internal_buffer_desc test_desc;
10758         u32 sram_dma_descs;
10759         int i, ret;
10760
10761         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10762
10763         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10764         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10765         tw32(RDMAC_STATUS, 0);
10766         tw32(WDMAC_STATUS, 0);
10767
10768         tw32(BUFMGR_MODE, 0);
10769         tw32(FTQ_RESET, 0);
10770
10771         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10772         test_desc.addr_lo = buf_dma & 0xffffffff;
10773         test_desc.nic_mbuf = 0x00002100;
10774         test_desc.len = size;
10775
10776         /*
10777          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10778          * the *second* time the tg3 driver was getting loaded after an
10779          * initial scan.
10780          *
10781          * Broadcom tells me:
10782          *   ...the DMA engine is connected to the GRC block and a DMA
10783          *   reset may affect the GRC block in some unpredictable way...
10784          *   The behavior of resets to individual blocks has not been tested.
10785          *
10786          * Broadcom noted the GRC reset will also reset all sub-components.
10787          */
10788         if (to_device) {
10789                 test_desc.cqid_sqid = (13 << 8) | 2;
10790
10791                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10792                 udelay(40);
10793         } else {
10794                 test_desc.cqid_sqid = (16 << 8) | 7;
10795
10796                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10797                 udelay(40);
10798         }
10799         test_desc.flags = 0x00000005;
10800
10801         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10802                 u32 val;
10803
10804                 val = *(((u32 *)&test_desc) + i);
10805                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10806                                        sram_dma_descs + (i * sizeof(u32)));
10807                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10808         }
10809         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10810
10811         if (to_device) {
10812                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10813         } else {
10814                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10815         }
10816
10817         ret = -ENODEV;
10818         for (i = 0; i < 40; i++) {
10819                 u32 val;
10820
10821                 if (to_device)
10822                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10823                 else
10824                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10825                 if ((val & 0xffff) == sram_dma_descs) {
10826                         ret = 0;
10827                         break;
10828                 }
10829
10830                 udelay(100);
10831         }
10832
10833         return ret;
10834 }
10835
10836 #define TEST_BUFFER_SIZE        0x2000
10837
10838 static int __devinit tg3_test_dma(struct tg3 *tp)
10839 {
10840         dma_addr_t buf_dma;
10841         u32 *buf, saved_dma_rwctrl;
10842         int ret;
10843
10844         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
10845         if (!buf) {
10846                 ret = -ENOMEM;
10847                 goto out_nofree;
10848         }
10849
10850         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
10851                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
10852
10853         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
10854
10855         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10856                 /* DMA read watermark not used on PCIE */
10857                 tp->dma_rwctrl |= 0x00180000;
10858         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
10859                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
10860                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
10861                         tp->dma_rwctrl |= 0x003f0000;
10862                 else
10863                         tp->dma_rwctrl |= 0x003f000f;
10864         } else {
10865                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10866                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
10867                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
10868
10869                         /* If the 5704 is behind the EPB bridge, we can
10870                          * do the less restrictive ONE_DMA workaround for
10871                          * better performance.
10872                          */
10873                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
10874                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10875                                 tp->dma_rwctrl |= 0x8000;
10876                         else if (ccval == 0x6 || ccval == 0x7)
10877                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
10878
10879                         /* Set bit 23 to enable PCIX hw bug fix */
10880                         tp->dma_rwctrl |= 0x009f0000;
10881                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10882                         /* 5780 always in PCIX mode */
10883                         tp->dma_rwctrl |= 0x00144000;
10884                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10885                         /* 5714 always in PCIX mode */
10886                         tp->dma_rwctrl |= 0x00148000;
10887                 } else {
10888                         tp->dma_rwctrl |= 0x001b000f;
10889                 }
10890         }
10891
10892         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
10893             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
10894                 tp->dma_rwctrl &= 0xfffffff0;
10895
10896         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10897             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
10898                 /* Remove this if it causes problems for some boards. */
10899                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
10900
10901                 /* On 5700/5701 chips, we need to set this bit.
10902                  * Otherwise the chip will issue cacheline transactions
10903                  * to streamable DMA memory with not all the byte
10904                  * enables turned on.  This is an error on several
10905                  * RISC PCI controllers, in particular sparc64.
10906                  *
10907                  * On 5703/5704 chips, this bit has been reassigned
10908                  * a different meaning.  In particular, it is used
10909                  * on those chips to enable a PCI-X workaround.
10910                  */
10911                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
10912         }
10913
10914         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10915
10916 #if 0
10917         /* Unneeded, already done by tg3_get_invariants.  */
10918         tg3_switch_clocks(tp);
10919 #endif
10920
10921         ret = 0;
10922         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10923             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
10924                 goto out;
10925
10926         /* It is best to perform DMA test with maximum write burst size
10927          * to expose the 5700/5701 write DMA bug.
10928          */
10929         saved_dma_rwctrl = tp->dma_rwctrl;
10930         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10931         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10932
10933         while (1) {
10934                 u32 *p = buf, i;
10935
10936                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
10937                         p[i] = i;
10938
10939                 /* Send the buffer to the chip. */
10940                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
10941                 if (ret) {
10942                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
10943                         break;
10944                 }
10945
10946 #if 0
10947                 /* validate data reached card RAM correctly. */
10948                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10949                         u32 val;
10950                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
10951                         if (le32_to_cpu(val) != p[i]) {
10952                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
10953                                 /* ret = -ENODEV here? */
10954                         }
10955                         p[i] = 0;
10956                 }
10957 #endif
10958                 /* Now read it back. */
10959                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
10960                 if (ret) {
10961                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
10962
10963                         break;
10964                 }
10965
10966                 /* Verify it. */
10967                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
10968                         if (p[i] == i)
10969                                 continue;
10970
10971                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10972                             DMA_RWCTRL_WRITE_BNDRY_16) {
10973                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
10974                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
10975                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10976                                 break;
10977                         } else {
10978                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
10979                                 ret = -ENODEV;
10980                                 goto out;
10981                         }
10982                 }
10983
10984                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
10985                         /* Success. */
10986                         ret = 0;
10987                         break;
10988                 }
10989         }
10990         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
10991             DMA_RWCTRL_WRITE_BNDRY_16) {
10992                 static struct pci_device_id dma_wait_state_chipsets[] = {
10993                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
10994                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
10995                         { },
10996                 };
10997
10998                 /* DMA test passed without adjusting DMA boundary,
10999                  * now look for chipsets that are known to expose the
11000                  * DMA bug without failing the test.
11001                  */
11002                 if (pci_dev_present(dma_wait_state_chipsets)) {
11003                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11004                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11005                 }
11006                 else
11007                         /* Safe to use the calculated DMA boundary. */
11008                         tp->dma_rwctrl = saved_dma_rwctrl;
11009
11010                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11011         }
11012
11013 out:
11014         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11015 out_nofree:
11016         return ret;
11017 }
11018
11019 static void __devinit tg3_init_link_config(struct tg3 *tp)
11020 {
11021         tp->link_config.advertising =
11022                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11023                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11024                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11025                  ADVERTISED_Autoneg | ADVERTISED_MII);
11026         tp->link_config.speed = SPEED_INVALID;
11027         tp->link_config.duplex = DUPLEX_INVALID;
11028         tp->link_config.autoneg = AUTONEG_ENABLE;
11029         tp->link_config.active_speed = SPEED_INVALID;
11030         tp->link_config.active_duplex = DUPLEX_INVALID;
11031         tp->link_config.phy_is_low_power = 0;
11032         tp->link_config.orig_speed = SPEED_INVALID;
11033         tp->link_config.orig_duplex = DUPLEX_INVALID;
11034         tp->link_config.orig_autoneg = AUTONEG_INVALID;
11035 }
11036
11037 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11038 {
11039         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11040                 tp->bufmgr_config.mbuf_read_dma_low_water =
11041                         DEFAULT_MB_RDMA_LOW_WATER_5705;
11042                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11043                         DEFAULT_MB_MACRX_LOW_WATER_5705;
11044                 tp->bufmgr_config.mbuf_high_water =
11045                         DEFAULT_MB_HIGH_WATER_5705;
11046
11047                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11048                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11049                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11050                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11051                 tp->bufmgr_config.mbuf_high_water_jumbo =
11052                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11053         } else {
11054                 tp->bufmgr_config.mbuf_read_dma_low_water =
11055                         DEFAULT_MB_RDMA_LOW_WATER;
11056                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11057                         DEFAULT_MB_MACRX_LOW_WATER;
11058                 tp->bufmgr_config.mbuf_high_water =
11059                         DEFAULT_MB_HIGH_WATER;
11060
11061                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11062                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11063                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11064                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11065                 tp->bufmgr_config.mbuf_high_water_jumbo =
11066                         DEFAULT_MB_HIGH_WATER_JUMBO;
11067         }
11068
11069         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11070         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11071 }
11072
11073 static char * __devinit tg3_phy_string(struct tg3 *tp)
11074 {
11075         switch (tp->phy_id & PHY_ID_MASK) {
11076         case PHY_ID_BCM5400:    return "5400";
11077         case PHY_ID_BCM5401:    return "5401";
11078         case PHY_ID_BCM5411:    return "5411";
11079         case PHY_ID_BCM5701:    return "5701";
11080         case PHY_ID_BCM5703:    return "5703";
11081         case PHY_ID_BCM5704:    return "5704";
11082         case PHY_ID_BCM5705:    return "5705";
11083         case PHY_ID_BCM5750:    return "5750";
11084         case PHY_ID_BCM5752:    return "5752";
11085         case PHY_ID_BCM5714:    return "5714";
11086         case PHY_ID_BCM5780:    return "5780";
11087         case PHY_ID_BCM5755:    return "5755";
11088         case PHY_ID_BCM5787:    return "5787";
11089         case PHY_ID_BCM8002:    return "8002/serdes";
11090         case 0:                 return "serdes";
11091         default:                return "unknown";
11092         };
11093 }
11094
11095 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11096 {
11097         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11098                 strcpy(str, "PCI Express");
11099                 return str;
11100         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11101                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11102
11103                 strcpy(str, "PCIX:");
11104
11105                 if ((clock_ctrl == 7) ||
11106                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11107                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11108                         strcat(str, "133MHz");
11109                 else if (clock_ctrl == 0)
11110                         strcat(str, "33MHz");
11111                 else if (clock_ctrl == 2)
11112                         strcat(str, "50MHz");
11113                 else if (clock_ctrl == 4)
11114                         strcat(str, "66MHz");
11115                 else if (clock_ctrl == 6)
11116                         strcat(str, "100MHz");
11117         } else {
11118                 strcpy(str, "PCI:");
11119                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11120                         strcat(str, "66MHz");
11121                 else
11122                         strcat(str, "33MHz");
11123         }
11124         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11125                 strcat(str, ":32-bit");
11126         else
11127                 strcat(str, ":64-bit");
11128         return str;
11129 }
11130
11131 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11132 {
11133         struct pci_dev *peer;
11134         unsigned int func, devnr = tp->pdev->devfn & ~7;
11135
11136         for (func = 0; func < 8; func++) {
11137                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11138                 if (peer && peer != tp->pdev)
11139                         break;
11140                 pci_dev_put(peer);
11141         }
11142         /* 5704 can be configured in single-port mode, set peer to
11143          * tp->pdev in that case.
11144          */
11145         if (!peer) {
11146                 peer = tp->pdev;
11147                 return peer;
11148         }
11149
11150         /*
11151          * We don't need to keep the refcount elevated; there's no way
11152          * to remove one half of this device without removing the other
11153          */
11154         pci_dev_put(peer);
11155
11156         return peer;
11157 }
11158
11159 static void __devinit tg3_init_coal(struct tg3 *tp)
11160 {
11161         struct ethtool_coalesce *ec = &tp->coal;
11162
11163         memset(ec, 0, sizeof(*ec));
11164         ec->cmd = ETHTOOL_GCOALESCE;
11165         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11166         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11167         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11168         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11169         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11170         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11171         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11172         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11173         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11174
11175         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11176                                  HOSTCC_MODE_CLRTICK_TXBD)) {
11177                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11178                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11179                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11180                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11181         }
11182
11183         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11184                 ec->rx_coalesce_usecs_irq = 0;
11185                 ec->tx_coalesce_usecs_irq = 0;
11186                 ec->stats_block_coalesce_usecs = 0;
11187         }
11188 }
11189
11190 static int __devinit tg3_init_one(struct pci_dev *pdev,
11191                                   const struct pci_device_id *ent)
11192 {
11193         static int tg3_version_printed = 0;
11194         unsigned long tg3reg_base, tg3reg_len;
11195         struct net_device *dev;
11196         struct tg3 *tp;
11197         int i, err, pm_cap;
11198         char str[40];
11199         u64 dma_mask, persist_dma_mask;
11200
11201         if (tg3_version_printed++ == 0)
11202                 printk(KERN_INFO "%s", version);
11203
11204         err = pci_enable_device(pdev);
11205         if (err) {
11206                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11207                        "aborting.\n");
11208                 return err;
11209         }
11210
11211         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11212                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11213                        "base address, aborting.\n");
11214                 err = -ENODEV;
11215                 goto err_out_disable_pdev;
11216         }
11217
11218         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11219         if (err) {
11220                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11221                        "aborting.\n");
11222                 goto err_out_disable_pdev;
11223         }
11224
11225         pci_set_master(pdev);
11226
11227         /* Find power-management capability. */
11228         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11229         if (pm_cap == 0) {
11230                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11231                        "aborting.\n");
11232                 err = -EIO;
11233                 goto err_out_free_res;
11234         }
11235
11236         tg3reg_base = pci_resource_start(pdev, 0);
11237         tg3reg_len = pci_resource_len(pdev, 0);
11238
11239         dev = alloc_etherdev(sizeof(*tp));
11240         if (!dev) {
11241                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11242                 err = -ENOMEM;
11243                 goto err_out_free_res;
11244         }
11245
11246         SET_MODULE_OWNER(dev);
11247         SET_NETDEV_DEV(dev, &pdev->dev);
11248
11249         dev->features |= NETIF_F_LLTX;
11250 #if TG3_VLAN_TAG_USED
11251         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11252         dev->vlan_rx_register = tg3_vlan_rx_register;
11253         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11254 #endif
11255
11256         tp = netdev_priv(dev);
11257         tp->pdev = pdev;
11258         tp->dev = dev;
11259         tp->pm_cap = pm_cap;
11260         tp->mac_mode = TG3_DEF_MAC_MODE;
11261         tp->rx_mode = TG3_DEF_RX_MODE;
11262         tp->tx_mode = TG3_DEF_TX_MODE;
11263         tp->mi_mode = MAC_MI_MODE_BASE;
11264         if (tg3_debug > 0)
11265                 tp->msg_enable = tg3_debug;
11266         else
11267                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11268
11269         /* The word/byte swap controls here control register access byte
11270          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11271          * setting below.
11272          */
11273         tp->misc_host_ctrl =
11274                 MISC_HOST_CTRL_MASK_PCI_INT |
11275                 MISC_HOST_CTRL_WORD_SWAP |
11276                 MISC_HOST_CTRL_INDIR_ACCESS |
11277                 MISC_HOST_CTRL_PCISTATE_RW;
11278
11279         /* The NONFRM (non-frame) byte/word swap controls take effect
11280          * on descriptor entries, anything which isn't packet data.
11281          *
11282          * The StrongARM chips on the board (one for tx, one for rx)
11283          * are running in big-endian mode.
11284          */
11285         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11286                         GRC_MODE_WSWAP_NONFRM_DATA);
11287 #ifdef __BIG_ENDIAN
11288         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11289 #endif
11290         spin_lock_init(&tp->lock);
11291         spin_lock_init(&tp->tx_lock);
11292         spin_lock_init(&tp->indirect_lock);
11293         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11294
11295         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11296         if (tp->regs == 0UL) {
11297                 printk(KERN_ERR PFX "Cannot map device registers, "
11298                        "aborting.\n");
11299                 err = -ENOMEM;
11300                 goto err_out_free_dev;
11301         }
11302
11303         tg3_init_link_config(tp);
11304
11305         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11306         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11307         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11308
11309         dev->open = tg3_open;
11310         dev->stop = tg3_close;
11311         dev->get_stats = tg3_get_stats;
11312         dev->set_multicast_list = tg3_set_rx_mode;
11313         dev->set_mac_address = tg3_set_mac_addr;
11314         dev->do_ioctl = tg3_ioctl;
11315         dev->tx_timeout = tg3_tx_timeout;
11316         dev->poll = tg3_poll;
11317         dev->ethtool_ops = &tg3_ethtool_ops;
11318         dev->weight = 64;
11319         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11320         dev->change_mtu = tg3_change_mtu;
11321         dev->irq = pdev->irq;
11322 #ifdef CONFIG_NET_POLL_CONTROLLER
11323         dev->poll_controller = tg3_poll_controller;
11324 #endif
11325
11326         err = tg3_get_invariants(tp);
11327         if (err) {
11328                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11329                        "aborting.\n");
11330                 goto err_out_iounmap;
11331         }
11332
11333         /* The EPB bridge inside 5714, 5715, and 5780 and any
11334          * device behind the EPB cannot support DMA addresses > 40-bit.
11335          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11336          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11337          * do DMA address check in tg3_start_xmit().
11338          */
11339         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11340                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11341         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11342                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11343 #ifdef CONFIG_HIGHMEM
11344                 dma_mask = DMA_64BIT_MASK;
11345 #endif
11346         } else
11347                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11348
11349         /* Configure DMA attributes. */
11350         if (dma_mask > DMA_32BIT_MASK) {
11351                 err = pci_set_dma_mask(pdev, dma_mask);
11352                 if (!err) {
11353                         dev->features |= NETIF_F_HIGHDMA;
11354                         err = pci_set_consistent_dma_mask(pdev,
11355                                                           persist_dma_mask);
11356                         if (err < 0) {
11357                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11358                                        "DMA for consistent allocations\n");
11359                                 goto err_out_iounmap;
11360                         }
11361                 }
11362         }
11363         if (err || dma_mask == DMA_32BIT_MASK) {
11364                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11365                 if (err) {
11366                         printk(KERN_ERR PFX "No usable DMA configuration, "
11367                                "aborting.\n");
11368                         goto err_out_iounmap;
11369                 }
11370         }
11371
11372         tg3_init_bufmgr_config(tp);
11373
11374 #if TG3_TSO_SUPPORT != 0
11375         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11376                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11377         }
11378         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11379             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11380             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11381             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11382                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11383         } else {
11384                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11385         }
11386
11387         /* TSO is on by default on chips that support hardware TSO.
11388          * Firmware TSO on older chips gives lower performance, so it
11389          * is off by default, but can be enabled using ethtool.
11390          */
11391         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
11392                 dev->features |= NETIF_F_TSO;
11393
11394 #endif
11395
11396         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11397             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11398             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11399                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11400                 tp->rx_pending = 63;
11401         }
11402
11403         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11404             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11405                 tp->pdev_peer = tg3_find_peer(tp);
11406
11407         err = tg3_get_device_address(tp);
11408         if (err) {
11409                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11410                        "aborting.\n");
11411                 goto err_out_iounmap;
11412         }
11413
11414         /*
11415          * Reset chip in case UNDI or EFI driver did not shutdown
11416          * DMA self test will enable WDMAC and we'll see (spurious)
11417          * pending DMA on the PCI bus at that point.
11418          */
11419         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11420             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11421                 pci_save_state(tp->pdev);
11422                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11423                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11424         }
11425
11426         err = tg3_test_dma(tp);
11427         if (err) {
11428                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11429                 goto err_out_iounmap;
11430         }
11431
11432         /* Tigon3 can do ipv4 only... and some chips have buggy
11433          * checksumming.
11434          */
11435         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11436                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11437                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11438                         dev->features |= NETIF_F_HW_CSUM;
11439                 else
11440                         dev->features |= NETIF_F_IP_CSUM;
11441                 dev->features |= NETIF_F_SG;
11442                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11443         } else
11444                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11445
11446         /* flow control autonegotiation is default behavior */
11447         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11448
11449         tg3_init_coal(tp);
11450
11451         /* Now that we have fully setup the chip, save away a snapshot
11452          * of the PCI config space.  We need to restore this after
11453          * GRC_MISC_CFG core clock resets and some resume events.
11454          */
11455         pci_save_state(tp->pdev);
11456
11457         err = register_netdev(dev);
11458         if (err) {
11459                 printk(KERN_ERR PFX "Cannot register net device, "
11460                        "aborting.\n");
11461                 goto err_out_iounmap;
11462         }
11463
11464         pci_set_drvdata(pdev, dev);
11465
11466         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11467                dev->name,
11468                tp->board_part_number,
11469                tp->pci_chip_rev_id,
11470                tg3_phy_string(tp),
11471                tg3_bus_string(tp, str),
11472                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11473
11474         for (i = 0; i < 6; i++)
11475                 printk("%2.2x%c", dev->dev_addr[i],
11476                        i == 5 ? '\n' : ':');
11477
11478         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11479                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11480                "TSOcap[%d] \n",
11481                dev->name,
11482                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11483                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11484                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11485                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11486                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11487                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11488                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11489         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11490                dev->name, tp->dma_rwctrl,
11491                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11492                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11493
11494         netif_carrier_off(tp->dev);
11495
11496         return 0;
11497
11498 err_out_iounmap:
11499         if (tp->regs) {
11500                 iounmap(tp->regs);
11501                 tp->regs = NULL;
11502         }
11503
11504 err_out_free_dev:
11505         free_netdev(dev);
11506
11507 err_out_free_res:
11508         pci_release_regions(pdev);
11509
11510 err_out_disable_pdev:
11511         pci_disable_device(pdev);
11512         pci_set_drvdata(pdev, NULL);
11513         return err;
11514 }
11515
11516 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11517 {
11518         struct net_device *dev = pci_get_drvdata(pdev);
11519
11520         if (dev) {
11521                 struct tg3 *tp = netdev_priv(dev);
11522
11523                 flush_scheduled_work();
11524                 unregister_netdev(dev);
11525                 if (tp->regs) {
11526                         iounmap(tp->regs);
11527                         tp->regs = NULL;
11528                 }
11529                 free_netdev(dev);
11530                 pci_release_regions(pdev);
11531                 pci_disable_device(pdev);
11532                 pci_set_drvdata(pdev, NULL);
11533         }
11534 }
11535
11536 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11537 {
11538         struct net_device *dev = pci_get_drvdata(pdev);
11539         struct tg3 *tp = netdev_priv(dev);
11540         int err;
11541
11542         if (!netif_running(dev))
11543                 return 0;
11544
11545         flush_scheduled_work();
11546         tg3_netif_stop(tp);
11547
11548         del_timer_sync(&tp->timer);
11549
11550         tg3_full_lock(tp, 1);
11551         tg3_disable_ints(tp);
11552         tg3_full_unlock(tp);
11553
11554         netif_device_detach(dev);
11555
11556         tg3_full_lock(tp, 0);
11557         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11558         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11559         tg3_full_unlock(tp);
11560
11561         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11562         if (err) {
11563                 tg3_full_lock(tp, 0);
11564
11565                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11566                 tg3_init_hw(tp);
11567
11568                 tp->timer.expires = jiffies + tp->timer_offset;
11569                 add_timer(&tp->timer);
11570
11571                 netif_device_attach(dev);
11572                 tg3_netif_start(tp);
11573
11574                 tg3_full_unlock(tp);
11575         }
11576
11577         return err;
11578 }
11579
11580 static int tg3_resume(struct pci_dev *pdev)
11581 {
11582         struct net_device *dev = pci_get_drvdata(pdev);
11583         struct tg3 *tp = netdev_priv(dev);
11584         int err;
11585
11586         if (!netif_running(dev))
11587                 return 0;
11588
11589         pci_restore_state(tp->pdev);
11590
11591         err = tg3_set_power_state(tp, PCI_D0);
11592         if (err)
11593                 return err;
11594
11595         netif_device_attach(dev);
11596
11597         tg3_full_lock(tp, 0);
11598
11599         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11600         tg3_init_hw(tp);
11601
11602         tp->timer.expires = jiffies + tp->timer_offset;
11603         add_timer(&tp->timer);
11604
11605         tg3_netif_start(tp);
11606
11607         tg3_full_unlock(tp);
11608
11609         return 0;
11610 }
11611
11612 static struct pci_driver tg3_driver = {
11613         .name           = DRV_MODULE_NAME,
11614         .id_table       = tg3_pci_tbl,
11615         .probe          = tg3_init_one,
11616         .remove         = __devexit_p(tg3_remove_one),
11617         .suspend        = tg3_suspend,
11618         .resume         = tg3_resume
11619 };
11620
11621 static int __init tg3_init(void)
11622 {
11623         return pci_module_init(&tg3_driver);
11624 }
11625
11626 static void __exit tg3_cleanup(void)
11627 {
11628         pci_unregister_driver(&tg3_driver);
11629 }
11630
11631 module_init(tg3_init);
11632 module_exit(tg3_cleanup);