]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/tg3.c
[TG3]: Correct 5704S flowctrl advertisements
[karo-tx-linux.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43 #include <net/ip.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC
51 #include <asm/idprom.h>
52 #include <asm/prom.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #define TG3_TSO_SUPPORT 1
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME         "tg3"
66 #define PFX DRV_MODULE_NAME     ": "
67 #define DRV_MODULE_VERSION      "3.86"
68 #define DRV_MODULE_RELDATE      "November 9, 2007"
69
70 #define TG3_DEF_MAC_MODE        0
71 #define TG3_DEF_RX_MODE         0
72 #define TG3_DEF_TX_MODE         0
73 #define TG3_DEF_MSG_ENABLE        \
74         (NETIF_MSG_DRV          | \
75          NETIF_MSG_PROBE        | \
76          NETIF_MSG_LINK         | \
77          NETIF_MSG_TIMER        | \
78          NETIF_MSG_IFDOWN       | \
79          NETIF_MSG_IFUP         | \
80          NETIF_MSG_RX_ERR       | \
81          NETIF_MSG_TX_ERR)
82
83 /* length of time before we decide the hardware is borked,
84  * and dev->tx_timeout() should be called to fix the problem
85  */
86 #define TG3_TX_TIMEOUT                  (5 * HZ)
87
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU                     60
90 #define TG3_MAX_MTU(tp) \
91         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94  * You can't change the ring sizes, but you can change where you place
95  * them in the NIC onboard memory.
96  */
97 #define TG3_RX_RING_SIZE                512
98 #define TG3_DEF_RX_RING_PENDING         200
99 #define TG3_RX_JUMBO_RING_SIZE          256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
101
102 /* Do not place this n-ring entries value into the tp struct itself,
103  * we really want to expose these constants to GCC so that modulo et
104  * al.  operations are done with shifts and masks instead of with
105  * hw multiply/modulo instructions.  Another solution would be to
106  * replace things like '% foo' with '& (foo - 1)'.
107  */
108 #define TG3_RX_RCB_RING_SIZE(tp)        \
109         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
110
111 #define TG3_TX_RING_SIZE                512
112 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
113
114 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
115                                  TG3_RX_RING_SIZE)
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117                                  TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119                                    TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
121                                  TG3_TX_RING_SIZE)
122 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
126
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
129
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133 #define TG3_NUM_TEST            6
134
135 static char version[] __devinitdata =
136         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142
143 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static struct pci_device_id tg3_pci_tbl[] = {
148         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
206         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
209         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
210         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
211         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
212         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
213         {}
214 };
215
216 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
217
218 static const struct {
219         const char string[ETH_GSTRING_LEN];
220 } ethtool_stats_keys[TG3_NUM_STATS] = {
221         { "rx_octets" },
222         { "rx_fragments" },
223         { "rx_ucast_packets" },
224         { "rx_mcast_packets" },
225         { "rx_bcast_packets" },
226         { "rx_fcs_errors" },
227         { "rx_align_errors" },
228         { "rx_xon_pause_rcvd" },
229         { "rx_xoff_pause_rcvd" },
230         { "rx_mac_ctrl_rcvd" },
231         { "rx_xoff_entered" },
232         { "rx_frame_too_long_errors" },
233         { "rx_jabbers" },
234         { "rx_undersize_packets" },
235         { "rx_in_length_errors" },
236         { "rx_out_length_errors" },
237         { "rx_64_or_less_octet_packets" },
238         { "rx_65_to_127_octet_packets" },
239         { "rx_128_to_255_octet_packets" },
240         { "rx_256_to_511_octet_packets" },
241         { "rx_512_to_1023_octet_packets" },
242         { "rx_1024_to_1522_octet_packets" },
243         { "rx_1523_to_2047_octet_packets" },
244         { "rx_2048_to_4095_octet_packets" },
245         { "rx_4096_to_8191_octet_packets" },
246         { "rx_8192_to_9022_octet_packets" },
247
248         { "tx_octets" },
249         { "tx_collisions" },
250
251         { "tx_xon_sent" },
252         { "tx_xoff_sent" },
253         { "tx_flow_control" },
254         { "tx_mac_errors" },
255         { "tx_single_collisions" },
256         { "tx_mult_collisions" },
257         { "tx_deferred" },
258         { "tx_excessive_collisions" },
259         { "tx_late_collisions" },
260         { "tx_collide_2times" },
261         { "tx_collide_3times" },
262         { "tx_collide_4times" },
263         { "tx_collide_5times" },
264         { "tx_collide_6times" },
265         { "tx_collide_7times" },
266         { "tx_collide_8times" },
267         { "tx_collide_9times" },
268         { "tx_collide_10times" },
269         { "tx_collide_11times" },
270         { "tx_collide_12times" },
271         { "tx_collide_13times" },
272         { "tx_collide_14times" },
273         { "tx_collide_15times" },
274         { "tx_ucast_packets" },
275         { "tx_mcast_packets" },
276         { "tx_bcast_packets" },
277         { "tx_carrier_sense_errors" },
278         { "tx_discards" },
279         { "tx_errors" },
280
281         { "dma_writeq_full" },
282         { "dma_write_prioq_full" },
283         { "rxbds_empty" },
284         { "rx_discards" },
285         { "rx_errors" },
286         { "rx_threshold_hit" },
287
288         { "dma_readq_full" },
289         { "dma_read_prioq_full" },
290         { "tx_comp_queue_full" },
291
292         { "ring_set_send_prod_index" },
293         { "ring_status_update" },
294         { "nic_irqs" },
295         { "nic_avoided_irqs" },
296         { "nic_tx_threshold_hit" }
297 };
298
299 static const struct {
300         const char string[ETH_GSTRING_LEN];
301 } ethtool_test_keys[TG3_NUM_TEST] = {
302         { "nvram test     (online) " },
303         { "link test      (online) " },
304         { "register test  (offline)" },
305         { "memory test    (offline)" },
306         { "loopback test  (offline)" },
307         { "interrupt test (offline)" },
308 };
309
310 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
311 {
312         writel(val, tp->regs + off);
313 }
314
315 static u32 tg3_read32(struct tg3 *tp, u32 off)
316 {
317         return (readl(tp->regs + off));
318 }
319
320 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
321 {
322         writel(val, tp->aperegs + off);
323 }
324
325 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
326 {
327         return (readl(tp->aperegs + off));
328 }
329
330 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
331 {
332         unsigned long flags;
333
334         spin_lock_irqsave(&tp->indirect_lock, flags);
335         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
336         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
337         spin_unlock_irqrestore(&tp->indirect_lock, flags);
338 }
339
340 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
341 {
342         writel(val, tp->regs + off);
343         readl(tp->regs + off);
344 }
345
346 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
347 {
348         unsigned long flags;
349         u32 val;
350
351         spin_lock_irqsave(&tp->indirect_lock, flags);
352         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
353         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
354         spin_unlock_irqrestore(&tp->indirect_lock, flags);
355         return val;
356 }
357
358 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
359 {
360         unsigned long flags;
361
362         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
363                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
364                                        TG3_64BIT_REG_LOW, val);
365                 return;
366         }
367         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
368                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
369                                        TG3_64BIT_REG_LOW, val);
370                 return;
371         }
372
373         spin_lock_irqsave(&tp->indirect_lock, flags);
374         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
375         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
376         spin_unlock_irqrestore(&tp->indirect_lock, flags);
377
378         /* In indirect mode when disabling interrupts, we also need
379          * to clear the interrupt bit in the GRC local ctrl register.
380          */
381         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
382             (val == 0x1)) {
383                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
384                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
385         }
386 }
387
388 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
389 {
390         unsigned long flags;
391         u32 val;
392
393         spin_lock_irqsave(&tp->indirect_lock, flags);
394         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
395         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396         spin_unlock_irqrestore(&tp->indirect_lock, flags);
397         return val;
398 }
399
400 /* usec_wait specifies the wait time in usec when writing to certain registers
401  * where it is unsafe to read back the register without some delay.
402  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
403  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
404  */
405 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
406 {
407         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
408             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
409                 /* Non-posted methods */
410                 tp->write32(tp, off, val);
411         else {
412                 /* Posted method */
413                 tg3_write32(tp, off, val);
414                 if (usec_wait)
415                         udelay(usec_wait);
416                 tp->read32(tp, off);
417         }
418         /* Wait again after the read for the posted method to guarantee that
419          * the wait time is met.
420          */
421         if (usec_wait)
422                 udelay(usec_wait);
423 }
424
425 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
426 {
427         tp->write32_mbox(tp, off, val);
428         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
429             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430                 tp->read32_mbox(tp, off);
431 }
432
433 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
434 {
435         void __iomem *mbox = tp->regs + off;
436         writel(val, mbox);
437         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
438                 writel(val, mbox);
439         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
440                 readl(mbox);
441 }
442
443 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
444 {
445         return (readl(tp->regs + off + GRCMBOX_BASE));
446 }
447
448 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
449 {
450         writel(val, tp->regs + off + GRCMBOX_BASE);
451 }
452
453 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
454 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
455 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
456 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
457 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
458
459 #define tw32(reg,val)           tp->write32(tp, reg, val)
460 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
461 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
462 #define tr32(reg)               tp->read32(tp, reg)
463
464 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
469             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
470                 return;
471
472         spin_lock_irqsave(&tp->indirect_lock, flags);
473         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
475                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
476
477                 /* Always leave this as zero. */
478                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
479         } else {
480                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
481                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
482
483                 /* Always leave this as zero. */
484                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
485         }
486         spin_unlock_irqrestore(&tp->indirect_lock, flags);
487 }
488
489 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
490 {
491         unsigned long flags;
492
493         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
494             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
495                 *val = 0;
496                 return;
497         }
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
501                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
502                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
503
504                 /* Always leave this as zero. */
505                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
506         } else {
507                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
508                 *val = tr32(TG3PCI_MEM_WIN_DATA);
509
510                 /* Always leave this as zero. */
511                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
512         }
513         spin_unlock_irqrestore(&tp->indirect_lock, flags);
514 }
515
516 static void tg3_ape_lock_init(struct tg3 *tp)
517 {
518         int i;
519
520         /* Make sure the driver hasn't any stale locks. */
521         for (i = 0; i < 8; i++)
522                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
523                                 APE_LOCK_GRANT_DRIVER);
524 }
525
526 static int tg3_ape_lock(struct tg3 *tp, int locknum)
527 {
528         int i, off;
529         int ret = 0;
530         u32 status;
531
532         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
533                 return 0;
534
535         switch (locknum) {
536                 case TG3_APE_LOCK_MEM:
537                         break;
538                 default:
539                         return -EINVAL;
540         }
541
542         off = 4 * locknum;
543
544         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
545
546         /* Wait for up to 1 millisecond to acquire lock. */
547         for (i = 0; i < 100; i++) {
548                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
549                 if (status == APE_LOCK_GRANT_DRIVER)
550                         break;
551                 udelay(10);
552         }
553
554         if (status != APE_LOCK_GRANT_DRIVER) {
555                 /* Revoke the lock request. */
556                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
557                                 APE_LOCK_GRANT_DRIVER);
558
559                 ret = -EBUSY;
560         }
561
562         return ret;
563 }
564
565 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
566 {
567         int off;
568
569         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
570                 return;
571
572         switch (locknum) {
573                 case TG3_APE_LOCK_MEM:
574                         break;
575                 default:
576                         return;
577         }
578
579         off = 4 * locknum;
580         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
581 }
582
583 static void tg3_disable_ints(struct tg3 *tp)
584 {
585         tw32(TG3PCI_MISC_HOST_CTRL,
586              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
587         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
588 }
589
590 static inline void tg3_cond_int(struct tg3 *tp)
591 {
592         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
593             (tp->hw_status->status & SD_STATUS_UPDATED))
594                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
595         else
596                 tw32(HOSTCC_MODE, tp->coalesce_mode |
597                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
598 }
599
600 static void tg3_enable_ints(struct tg3 *tp)
601 {
602         tp->irq_sync = 0;
603         wmb();
604
605         tw32(TG3PCI_MISC_HOST_CTRL,
606              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
607         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
608                        (tp->last_tag << 24));
609         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
610                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
611                                (tp->last_tag << 24));
612         tg3_cond_int(tp);
613 }
614
615 static inline unsigned int tg3_has_work(struct tg3 *tp)
616 {
617         struct tg3_hw_status *sblk = tp->hw_status;
618         unsigned int work_exists = 0;
619
620         /* check for phy events */
621         if (!(tp->tg3_flags &
622               (TG3_FLAG_USE_LINKCHG_REG |
623                TG3_FLAG_POLL_SERDES))) {
624                 if (sblk->status & SD_STATUS_LINK_CHG)
625                         work_exists = 1;
626         }
627         /* check for RX/TX work to do */
628         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
629             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
630                 work_exists = 1;
631
632         return work_exists;
633 }
634
635 /* tg3_restart_ints
636  *  similar to tg3_enable_ints, but it accurately determines whether there
637  *  is new work pending and can return without flushing the PIO write
638  *  which reenables interrupts
639  */
640 static void tg3_restart_ints(struct tg3 *tp)
641 {
642         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
643                      tp->last_tag << 24);
644         mmiowb();
645
646         /* When doing tagged status, this work check is unnecessary.
647          * The last_tag we write above tells the chip which piece of
648          * work we've completed.
649          */
650         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
651             tg3_has_work(tp))
652                 tw32(HOSTCC_MODE, tp->coalesce_mode |
653                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
654 }
655
656 static inline void tg3_netif_stop(struct tg3 *tp)
657 {
658         tp->dev->trans_start = jiffies; /* prevent tx timeout */
659         napi_disable(&tp->napi);
660         netif_tx_disable(tp->dev);
661 }
662
663 static inline void tg3_netif_start(struct tg3 *tp)
664 {
665         netif_wake_queue(tp->dev);
666         /* NOTE: unconditional netif_wake_queue is only appropriate
667          * so long as all callers are assured to have free tx slots
668          * (such as after tg3_init_hw)
669          */
670         napi_enable(&tp->napi);
671         tp->hw_status->status |= SD_STATUS_UPDATED;
672         tg3_enable_ints(tp);
673 }
674
675 static void tg3_switch_clocks(struct tg3 *tp)
676 {
677         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
678         u32 orig_clock_ctrl;
679
680         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
681             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
682                 return;
683
684         orig_clock_ctrl = clock_ctrl;
685         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
686                        CLOCK_CTRL_CLKRUN_OENABLE |
687                        0x1f);
688         tp->pci_clock_ctrl = clock_ctrl;
689
690         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
691                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
692                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
693                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
694                 }
695         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
696                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
697                             clock_ctrl |
698                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
699                             40);
700                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
701                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
702                             40);
703         }
704         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
705 }
706
707 #define PHY_BUSY_LOOPS  5000
708
709 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
710 {
711         u32 frame_val;
712         unsigned int loops;
713         int ret;
714
715         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
716                 tw32_f(MAC_MI_MODE,
717                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
718                 udelay(80);
719         }
720
721         *val = 0x0;
722
723         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
724                       MI_COM_PHY_ADDR_MASK);
725         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
726                       MI_COM_REG_ADDR_MASK);
727         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
728
729         tw32_f(MAC_MI_COM, frame_val);
730
731         loops = PHY_BUSY_LOOPS;
732         while (loops != 0) {
733                 udelay(10);
734                 frame_val = tr32(MAC_MI_COM);
735
736                 if ((frame_val & MI_COM_BUSY) == 0) {
737                         udelay(5);
738                         frame_val = tr32(MAC_MI_COM);
739                         break;
740                 }
741                 loops -= 1;
742         }
743
744         ret = -EBUSY;
745         if (loops != 0) {
746                 *val = frame_val & MI_COM_DATA_MASK;
747                 ret = 0;
748         }
749
750         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
751                 tw32_f(MAC_MI_MODE, tp->mi_mode);
752                 udelay(80);
753         }
754
755         return ret;
756 }
757
758 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
759 {
760         u32 frame_val;
761         unsigned int loops;
762         int ret;
763
764         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
765             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
766                 return 0;
767
768         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
769                 tw32_f(MAC_MI_MODE,
770                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
771                 udelay(80);
772         }
773
774         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
775                       MI_COM_PHY_ADDR_MASK);
776         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
777                       MI_COM_REG_ADDR_MASK);
778         frame_val |= (val & MI_COM_DATA_MASK);
779         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
780
781         tw32_f(MAC_MI_COM, frame_val);
782
783         loops = PHY_BUSY_LOOPS;
784         while (loops != 0) {
785                 udelay(10);
786                 frame_val = tr32(MAC_MI_COM);
787                 if ((frame_val & MI_COM_BUSY) == 0) {
788                         udelay(5);
789                         frame_val = tr32(MAC_MI_COM);
790                         break;
791                 }
792                 loops -= 1;
793         }
794
795         ret = -EBUSY;
796         if (loops != 0)
797                 ret = 0;
798
799         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
800                 tw32_f(MAC_MI_MODE, tp->mi_mode);
801                 udelay(80);
802         }
803
804         return ret;
805 }
806
807 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
808 {
809         u32 phy;
810
811         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
812             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
813                 return;
814
815         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
816                 u32 ephy;
817
818                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
819                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
820                                      ephy | MII_TG3_EPHY_SHADOW_EN);
821                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
822                                 if (enable)
823                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
824                                 else
825                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
826                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
827                         }
828                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
829                 }
830         } else {
831                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
832                       MII_TG3_AUXCTL_SHDWSEL_MISC;
833                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
834                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
835                         if (enable)
836                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
837                         else
838                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
839                         phy |= MII_TG3_AUXCTL_MISC_WREN;
840                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
841                 }
842         }
843 }
844
845 static void tg3_phy_set_wirespeed(struct tg3 *tp)
846 {
847         u32 val;
848
849         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
850                 return;
851
852         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
853             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
854                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
855                              (val | (1 << 15) | (1 << 4)));
856 }
857
858 static int tg3_bmcr_reset(struct tg3 *tp)
859 {
860         u32 phy_control;
861         int limit, err;
862
863         /* OK, reset it, and poll the BMCR_RESET bit until it
864          * clears or we time out.
865          */
866         phy_control = BMCR_RESET;
867         err = tg3_writephy(tp, MII_BMCR, phy_control);
868         if (err != 0)
869                 return -EBUSY;
870
871         limit = 5000;
872         while (limit--) {
873                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
874                 if (err != 0)
875                         return -EBUSY;
876
877                 if ((phy_control & BMCR_RESET) == 0) {
878                         udelay(40);
879                         break;
880                 }
881                 udelay(10);
882         }
883         if (limit <= 0)
884                 return -EBUSY;
885
886         return 0;
887 }
888
889 static int tg3_wait_macro_done(struct tg3 *tp)
890 {
891         int limit = 100;
892
893         while (limit--) {
894                 u32 tmp32;
895
896                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
897                         if ((tmp32 & 0x1000) == 0)
898                                 break;
899                 }
900         }
901         if (limit <= 0)
902                 return -EBUSY;
903
904         return 0;
905 }
906
907 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
908 {
909         static const u32 test_pat[4][6] = {
910         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
911         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
912         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
913         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
914         };
915         int chan;
916
917         for (chan = 0; chan < 4; chan++) {
918                 int i;
919
920                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
921                              (chan * 0x2000) | 0x0200);
922                 tg3_writephy(tp, 0x16, 0x0002);
923
924                 for (i = 0; i < 6; i++)
925                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
926                                      test_pat[chan][i]);
927
928                 tg3_writephy(tp, 0x16, 0x0202);
929                 if (tg3_wait_macro_done(tp)) {
930                         *resetp = 1;
931                         return -EBUSY;
932                 }
933
934                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
935                              (chan * 0x2000) | 0x0200);
936                 tg3_writephy(tp, 0x16, 0x0082);
937                 if (tg3_wait_macro_done(tp)) {
938                         *resetp = 1;
939                         return -EBUSY;
940                 }
941
942                 tg3_writephy(tp, 0x16, 0x0802);
943                 if (tg3_wait_macro_done(tp)) {
944                         *resetp = 1;
945                         return -EBUSY;
946                 }
947
948                 for (i = 0; i < 6; i += 2) {
949                         u32 low, high;
950
951                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
952                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
953                             tg3_wait_macro_done(tp)) {
954                                 *resetp = 1;
955                                 return -EBUSY;
956                         }
957                         low &= 0x7fff;
958                         high &= 0x000f;
959                         if (low != test_pat[chan][i] ||
960                             high != test_pat[chan][i+1]) {
961                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
962                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
963                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
964
965                                 return -EBUSY;
966                         }
967                 }
968         }
969
970         return 0;
971 }
972
973 static int tg3_phy_reset_chanpat(struct tg3 *tp)
974 {
975         int chan;
976
977         for (chan = 0; chan < 4; chan++) {
978                 int i;
979
980                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
981                              (chan * 0x2000) | 0x0200);
982                 tg3_writephy(tp, 0x16, 0x0002);
983                 for (i = 0; i < 6; i++)
984                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
985                 tg3_writephy(tp, 0x16, 0x0202);
986                 if (tg3_wait_macro_done(tp))
987                         return -EBUSY;
988         }
989
990         return 0;
991 }
992
993 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
994 {
995         u32 reg32, phy9_orig;
996         int retries, do_phy_reset, err;
997
998         retries = 10;
999         do_phy_reset = 1;
1000         do {
1001                 if (do_phy_reset) {
1002                         err = tg3_bmcr_reset(tp);
1003                         if (err)
1004                                 return err;
1005                         do_phy_reset = 0;
1006                 }
1007
1008                 /* Disable transmitter and interrupt.  */
1009                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1010                         continue;
1011
1012                 reg32 |= 0x3000;
1013                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1014
1015                 /* Set full-duplex, 1000 mbps.  */
1016                 tg3_writephy(tp, MII_BMCR,
1017                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1018
1019                 /* Set to master mode.  */
1020                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1021                         continue;
1022
1023                 tg3_writephy(tp, MII_TG3_CTRL,
1024                              (MII_TG3_CTRL_AS_MASTER |
1025                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1026
1027                 /* Enable SM_DSP_CLOCK and 6dB.  */
1028                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1029
1030                 /* Block the PHY control access.  */
1031                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1032                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1033
1034                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1035                 if (!err)
1036                         break;
1037         } while (--retries);
1038
1039         err = tg3_phy_reset_chanpat(tp);
1040         if (err)
1041                 return err;
1042
1043         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1044         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1045
1046         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1047         tg3_writephy(tp, 0x16, 0x0000);
1048
1049         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1050             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1051                 /* Set Extended packet length bit for jumbo frames */
1052                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1053         }
1054         else {
1055                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1056         }
1057
1058         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1059
1060         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1061                 reg32 &= ~0x3000;
1062                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1063         } else if (!err)
1064                 err = -EBUSY;
1065
1066         return err;
1067 }
1068
1069 static void tg3_link_report(struct tg3 *);
1070
1071 /* This will reset the tigon3 PHY if there is no valid
1072  * link unless the FORCE argument is non-zero.
1073  */
1074 static int tg3_phy_reset(struct tg3 *tp)
1075 {
1076         u32 phy_status;
1077         int err;
1078
1079         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1080                 u32 val;
1081
1082                 val = tr32(GRC_MISC_CFG);
1083                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1084                 udelay(40);
1085         }
1086         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1087         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1088         if (err != 0)
1089                 return -EBUSY;
1090
1091         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1092                 netif_carrier_off(tp->dev);
1093                 tg3_link_report(tp);
1094         }
1095
1096         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1097             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1098             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1099                 err = tg3_phy_reset_5703_4_5(tp);
1100                 if (err)
1101                         return err;
1102                 goto out;
1103         }
1104
1105         err = tg3_bmcr_reset(tp);
1106         if (err)
1107                 return err;
1108
1109         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1110                 u32 val;
1111
1112                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1113                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1114                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1115                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1116                         udelay(40);
1117                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1118                 }
1119
1120                 /* Disable GPHY autopowerdown. */
1121                 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1122                              MII_TG3_MISC_SHDW_WREN |
1123                              MII_TG3_MISC_SHDW_APD_SEL |
1124                              MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1125         }
1126
1127 out:
1128         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1129                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1130                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1131                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1132                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1133                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1134                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1135         }
1136         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1137                 tg3_writephy(tp, 0x1c, 0x8d68);
1138                 tg3_writephy(tp, 0x1c, 0x8d68);
1139         }
1140         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1141                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1142                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1143                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1144                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1145                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1146                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1147                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1148                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1149         }
1150         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1151                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1152                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1153                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1154                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1155                         tg3_writephy(tp, MII_TG3_TEST1,
1156                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1157                 } else
1158                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1159                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1160         }
1161         /* Set Extended packet length bit (bit 14) on all chips that */
1162         /* support jumbo frames */
1163         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1164                 /* Cannot do read-modify-write on 5401 */
1165                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1166         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1167                 u32 phy_reg;
1168
1169                 /* Set bit 14 with read-modify-write to preserve other bits */
1170                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1171                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1172                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1173         }
1174
1175         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1176          * jumbo frames transmission.
1177          */
1178         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1179                 u32 phy_reg;
1180
1181                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1182                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1183                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1184         }
1185
1186         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1187                 /* adjust output voltage */
1188                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1189         }
1190
1191         tg3_phy_toggle_automdix(tp, 1);
1192         tg3_phy_set_wirespeed(tp);
1193         return 0;
1194 }
1195
1196 static void tg3_frob_aux_power(struct tg3 *tp)
1197 {
1198         struct tg3 *tp_peer = tp;
1199
1200         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1201                 return;
1202
1203         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1204             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1205                 struct net_device *dev_peer;
1206
1207                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1208                 /* remove_one() may have been run on the peer. */
1209                 if (!dev_peer)
1210                         tp_peer = tp;
1211                 else
1212                         tp_peer = netdev_priv(dev_peer);
1213         }
1214
1215         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1216             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1217             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1218             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1219                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1220                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1221                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1222                                     (GRC_LCLCTRL_GPIO_OE0 |
1223                                      GRC_LCLCTRL_GPIO_OE1 |
1224                                      GRC_LCLCTRL_GPIO_OE2 |
1225                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1226                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1227                                     100);
1228                 } else {
1229                         u32 no_gpio2;
1230                         u32 grc_local_ctrl = 0;
1231
1232                         if (tp_peer != tp &&
1233                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1234                                 return;
1235
1236                         /* Workaround to prevent overdrawing Amps. */
1237                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1238                             ASIC_REV_5714) {
1239                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1240                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1241                                             grc_local_ctrl, 100);
1242                         }
1243
1244                         /* On 5753 and variants, GPIO2 cannot be used. */
1245                         no_gpio2 = tp->nic_sram_data_cfg &
1246                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1247
1248                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1249                                          GRC_LCLCTRL_GPIO_OE1 |
1250                                          GRC_LCLCTRL_GPIO_OE2 |
1251                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1252                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1253                         if (no_gpio2) {
1254                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1255                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1256                         }
1257                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1258                                                     grc_local_ctrl, 100);
1259
1260                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1261
1262                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1263                                                     grc_local_ctrl, 100);
1264
1265                         if (!no_gpio2) {
1266                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1267                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1268                                             grc_local_ctrl, 100);
1269                         }
1270                 }
1271         } else {
1272                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1273                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1274                         if (tp_peer != tp &&
1275                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1276                                 return;
1277
1278                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1279                                     (GRC_LCLCTRL_GPIO_OE1 |
1280                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1281
1282                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1283                                     GRC_LCLCTRL_GPIO_OE1, 100);
1284
1285                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1286                                     (GRC_LCLCTRL_GPIO_OE1 |
1287                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1288                 }
1289         }
1290 }
1291
1292 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1293 {
1294         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1295                 return 1;
1296         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1297                 if (speed != SPEED_10)
1298                         return 1;
1299         } else if (speed == SPEED_10)
1300                 return 1;
1301
1302         return 0;
1303 }
1304
1305 static int tg3_setup_phy(struct tg3 *, int);
1306
1307 #define RESET_KIND_SHUTDOWN     0
1308 #define RESET_KIND_INIT         1
1309 #define RESET_KIND_SUSPEND      2
1310
1311 static void tg3_write_sig_post_reset(struct tg3 *, int);
1312 static int tg3_halt_cpu(struct tg3 *, u32);
1313 static int tg3_nvram_lock(struct tg3 *);
1314 static void tg3_nvram_unlock(struct tg3 *);
1315
1316 static void tg3_power_down_phy(struct tg3 *tp)
1317 {
1318         u32 val;
1319
1320         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1321                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1322                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1323                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1324
1325                         sg_dig_ctrl |=
1326                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1327                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
1328                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1329                 }
1330                 return;
1331         }
1332
1333         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1334                 tg3_bmcr_reset(tp);
1335                 val = tr32(GRC_MISC_CFG);
1336                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1337                 udelay(40);
1338                 return;
1339         } else {
1340                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1341                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1342                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1343         }
1344
1345         /* The PHY should not be powered down on some chips because
1346          * of bugs.
1347          */
1348         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1349             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1350             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1351              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1352                 return;
1353
1354         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1355                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1356                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1357                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
1358                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1359         }
1360
1361         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1362 }
1363
1364 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1365 {
1366         u32 misc_host_ctrl;
1367         u16 power_control, power_caps;
1368         int pm = tp->pm_cap;
1369
1370         /* Make sure register accesses (indirect or otherwise)
1371          * will function correctly.
1372          */
1373         pci_write_config_dword(tp->pdev,
1374                                TG3PCI_MISC_HOST_CTRL,
1375                                tp->misc_host_ctrl);
1376
1377         pci_read_config_word(tp->pdev,
1378                              pm + PCI_PM_CTRL,
1379                              &power_control);
1380         power_control |= PCI_PM_CTRL_PME_STATUS;
1381         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1382         switch (state) {
1383         case PCI_D0:
1384                 power_control |= 0;
1385                 pci_write_config_word(tp->pdev,
1386                                       pm + PCI_PM_CTRL,
1387                                       power_control);
1388                 udelay(100);    /* Delay after power state change */
1389
1390                 /* Switch out of Vaux if it is a NIC */
1391                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1392                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1393
1394                 return 0;
1395
1396         case PCI_D1:
1397                 power_control |= 1;
1398                 break;
1399
1400         case PCI_D2:
1401                 power_control |= 2;
1402                 break;
1403
1404         case PCI_D3hot:
1405                 power_control |= 3;
1406                 break;
1407
1408         default:
1409                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1410                        "requested.\n",
1411                        tp->dev->name, state);
1412                 return -EINVAL;
1413         };
1414
1415         power_control |= PCI_PM_CTRL_PME_ENABLE;
1416
1417         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1418         tw32(TG3PCI_MISC_HOST_CTRL,
1419              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1420
1421         if (tp->link_config.phy_is_low_power == 0) {
1422                 tp->link_config.phy_is_low_power = 1;
1423                 tp->link_config.orig_speed = tp->link_config.speed;
1424                 tp->link_config.orig_duplex = tp->link_config.duplex;
1425                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1426         }
1427
1428         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1429                 tp->link_config.speed = SPEED_10;
1430                 tp->link_config.duplex = DUPLEX_HALF;
1431                 tp->link_config.autoneg = AUTONEG_ENABLE;
1432                 tg3_setup_phy(tp, 0);
1433         }
1434
1435         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1436                 u32 val;
1437
1438                 val = tr32(GRC_VCPU_EXT_CTRL);
1439                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1440         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1441                 int i;
1442                 u32 val;
1443
1444                 for (i = 0; i < 200; i++) {
1445                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1446                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1447                                 break;
1448                         msleep(1);
1449                 }
1450         }
1451         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1452                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1453                                                      WOL_DRV_STATE_SHUTDOWN |
1454                                                      WOL_DRV_WOL |
1455                                                      WOL_SET_MAGIC_PKT);
1456
1457         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1458
1459         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1460                 u32 mac_mode;
1461
1462                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1463                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1464                         udelay(40);
1465
1466                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1467                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1468                         else
1469                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1470
1471                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1472                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1473                             ASIC_REV_5700) {
1474                                 u32 speed = (tp->tg3_flags &
1475                                              TG3_FLAG_WOL_SPEED_100MB) ?
1476                                              SPEED_100 : SPEED_10;
1477                                 if (tg3_5700_link_polarity(tp, speed))
1478                                         mac_mode |= MAC_MODE_LINK_POLARITY;
1479                                 else
1480                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
1481                         }
1482                 } else {
1483                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1484                 }
1485
1486                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1487                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1488
1489                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1490                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1491                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1492
1493                 tw32_f(MAC_MODE, mac_mode);
1494                 udelay(100);
1495
1496                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1497                 udelay(10);
1498         }
1499
1500         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1501             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1502              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1503                 u32 base_val;
1504
1505                 base_val = tp->pci_clock_ctrl;
1506                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1507                              CLOCK_CTRL_TXCLK_DISABLE);
1508
1509                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1510                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1511         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1512                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1513                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1514                 /* do nothing */
1515         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1516                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1517                 u32 newbits1, newbits2;
1518
1519                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1520                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1521                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1522                                     CLOCK_CTRL_TXCLK_DISABLE |
1523                                     CLOCK_CTRL_ALTCLK);
1524                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1525                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1526                         newbits1 = CLOCK_CTRL_625_CORE;
1527                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1528                 } else {
1529                         newbits1 = CLOCK_CTRL_ALTCLK;
1530                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1531                 }
1532
1533                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1534                             40);
1535
1536                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1537                             40);
1538
1539                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1540                         u32 newbits3;
1541
1542                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1543                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1544                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1545                                             CLOCK_CTRL_TXCLK_DISABLE |
1546                                             CLOCK_CTRL_44MHZ_CORE);
1547                         } else {
1548                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1549                         }
1550
1551                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1552                                     tp->pci_clock_ctrl | newbits3, 40);
1553                 }
1554         }
1555
1556         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1557             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1558             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
1559                 tg3_power_down_phy(tp);
1560
1561         tg3_frob_aux_power(tp);
1562
1563         /* Workaround for unstable PLL clock */
1564         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1565             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1566                 u32 val = tr32(0x7d00);
1567
1568                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1569                 tw32(0x7d00, val);
1570                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1571                         int err;
1572
1573                         err = tg3_nvram_lock(tp);
1574                         tg3_halt_cpu(tp, RX_CPU_BASE);
1575                         if (!err)
1576                                 tg3_nvram_unlock(tp);
1577                 }
1578         }
1579
1580         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1581
1582         /* Finally, set the new power state. */
1583         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1584         udelay(100);    /* Delay after power state change */
1585
1586         return 0;
1587 }
1588
1589 static void tg3_link_report(struct tg3 *tp)
1590 {
1591         if (!netif_carrier_ok(tp->dev)) {
1592                 if (netif_msg_link(tp))
1593                         printk(KERN_INFO PFX "%s: Link is down.\n",
1594                                tp->dev->name);
1595         } else if (netif_msg_link(tp)) {
1596                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1597                        tp->dev->name,
1598                        (tp->link_config.active_speed == SPEED_1000 ?
1599                         1000 :
1600                         (tp->link_config.active_speed == SPEED_100 ?
1601                          100 : 10)),
1602                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1603                         "full" : "half"));
1604
1605                 printk(KERN_INFO PFX
1606                        "%s: Flow control is %s for TX and %s for RX.\n",
1607                        tp->dev->name,
1608                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1609                        "on" : "off",
1610                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1611                        "on" : "off");
1612         }
1613 }
1614
1615 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1616 {
1617         u16 miireg;
1618
1619         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1620                 miireg = ADVERTISE_PAUSE_CAP;
1621         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1622                 miireg = ADVERTISE_PAUSE_ASYM;
1623         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1624                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1625         else
1626                 miireg = 0;
1627
1628         return miireg;
1629 }
1630
1631 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1632 {
1633         u16 miireg;
1634
1635         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1636                 miireg = ADVERTISE_1000XPAUSE;
1637         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1638                 miireg = ADVERTISE_1000XPSE_ASYM;
1639         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1640                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1641         else
1642                 miireg = 0;
1643
1644         return miireg;
1645 }
1646
1647 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1648 {
1649         u8 cap = 0;
1650
1651         if (lcladv & ADVERTISE_PAUSE_CAP) {
1652                 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1653                         if (rmtadv & LPA_PAUSE_CAP)
1654                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1655                         else if (rmtadv & LPA_PAUSE_ASYM)
1656                                 cap = TG3_FLOW_CTRL_RX;
1657                 } else {
1658                         if (rmtadv & LPA_PAUSE_CAP)
1659                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1660                 }
1661         } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1662                 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1663                         cap = TG3_FLOW_CTRL_TX;
1664         }
1665
1666         return cap;
1667 }
1668
1669 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1670 {
1671         u8 cap = 0;
1672
1673         if (lcladv & ADVERTISE_1000XPAUSE) {
1674                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1675                         if (rmtadv & LPA_1000XPAUSE)
1676                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1677                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1678                                 cap = TG3_FLOW_CTRL_RX;
1679                 } else {
1680                         if (rmtadv & LPA_1000XPAUSE)
1681                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1682                 }
1683         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1684                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1685                         cap = TG3_FLOW_CTRL_TX;
1686         }
1687
1688         return cap;
1689 }
1690
1691 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1692 {
1693         u8 new_tg3_flags = 0;
1694         u32 old_rx_mode = tp->rx_mode;
1695         u32 old_tx_mode = tp->tx_mode;
1696
1697         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1698                 if (tp->tg3_flags2 & (TG3_FLG2_MII_SERDES|TG3_FLG2_HW_AUTONEG))
1699                         new_tg3_flags = tg3_resolve_flowctrl_1000X(local_adv,
1700                                                                    remote_adv);
1701                 else
1702                         new_tg3_flags = tg3_resolve_flowctrl_1000T(local_adv,
1703                                                                    remote_adv);
1704         } else {
1705                 new_tg3_flags = tp->link_config.flowctrl;
1706         }
1707
1708         tp->link_config.active_flowctrl = new_tg3_flags;
1709
1710         if (new_tg3_flags & TG3_FLOW_CTRL_RX)
1711                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1712         else
1713                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1714
1715         if (old_rx_mode != tp->rx_mode) {
1716                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1717         }
1718
1719         if (new_tg3_flags & TG3_FLOW_CTRL_TX)
1720                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1721         else
1722                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1723
1724         if (old_tx_mode != tp->tx_mode) {
1725                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1726         }
1727 }
1728
1729 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1730 {
1731         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1732         case MII_TG3_AUX_STAT_10HALF:
1733                 *speed = SPEED_10;
1734                 *duplex = DUPLEX_HALF;
1735                 break;
1736
1737         case MII_TG3_AUX_STAT_10FULL:
1738                 *speed = SPEED_10;
1739                 *duplex = DUPLEX_FULL;
1740                 break;
1741
1742         case MII_TG3_AUX_STAT_100HALF:
1743                 *speed = SPEED_100;
1744                 *duplex = DUPLEX_HALF;
1745                 break;
1746
1747         case MII_TG3_AUX_STAT_100FULL:
1748                 *speed = SPEED_100;
1749                 *duplex = DUPLEX_FULL;
1750                 break;
1751
1752         case MII_TG3_AUX_STAT_1000HALF:
1753                 *speed = SPEED_1000;
1754                 *duplex = DUPLEX_HALF;
1755                 break;
1756
1757         case MII_TG3_AUX_STAT_1000FULL:
1758                 *speed = SPEED_1000;
1759                 *duplex = DUPLEX_FULL;
1760                 break;
1761
1762         default:
1763                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1764                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1765                                  SPEED_10;
1766                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1767                                   DUPLEX_HALF;
1768                         break;
1769                 }
1770                 *speed = SPEED_INVALID;
1771                 *duplex = DUPLEX_INVALID;
1772                 break;
1773         };
1774 }
1775
1776 static void tg3_phy_copper_begin(struct tg3 *tp)
1777 {
1778         u32 new_adv;
1779         int i;
1780
1781         if (tp->link_config.phy_is_low_power) {
1782                 /* Entering low power mode.  Disable gigabit and
1783                  * 100baseT advertisements.
1784                  */
1785                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1786
1787                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1788                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1789                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1790                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1791
1792                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1793         } else if (tp->link_config.speed == SPEED_INVALID) {
1794                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1795                         tp->link_config.advertising &=
1796                                 ~(ADVERTISED_1000baseT_Half |
1797                                   ADVERTISED_1000baseT_Full);
1798
1799                 new_adv = ADVERTISE_CSMA;
1800                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1801                         new_adv |= ADVERTISE_10HALF;
1802                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1803                         new_adv |= ADVERTISE_10FULL;
1804                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1805                         new_adv |= ADVERTISE_100HALF;
1806                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1807                         new_adv |= ADVERTISE_100FULL;
1808
1809                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1810
1811                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1812
1813                 if (tp->link_config.advertising &
1814                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1815                         new_adv = 0;
1816                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1817                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1818                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1819                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1820                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1821                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1822                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1823                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1824                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1825                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1826                 } else {
1827                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1828                 }
1829         } else {
1830                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1831                 new_adv |= ADVERTISE_CSMA;
1832
1833                 /* Asking for a specific link mode. */
1834                 if (tp->link_config.speed == SPEED_1000) {
1835                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1836
1837                         if (tp->link_config.duplex == DUPLEX_FULL)
1838                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1839                         else
1840                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1841                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1842                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1843                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1844                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1845                 } else {
1846                         if (tp->link_config.speed == SPEED_100) {
1847                                 if (tp->link_config.duplex == DUPLEX_FULL)
1848                                         new_adv |= ADVERTISE_100FULL;
1849                                 else
1850                                         new_adv |= ADVERTISE_100HALF;
1851                         } else {
1852                                 if (tp->link_config.duplex == DUPLEX_FULL)
1853                                         new_adv |= ADVERTISE_10FULL;
1854                                 else
1855                                         new_adv |= ADVERTISE_10HALF;
1856                         }
1857                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1858
1859                         new_adv = 0;
1860                 }
1861
1862                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1863         }
1864
1865         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1866             tp->link_config.speed != SPEED_INVALID) {
1867                 u32 bmcr, orig_bmcr;
1868
1869                 tp->link_config.active_speed = tp->link_config.speed;
1870                 tp->link_config.active_duplex = tp->link_config.duplex;
1871
1872                 bmcr = 0;
1873                 switch (tp->link_config.speed) {
1874                 default:
1875                 case SPEED_10:
1876                         break;
1877
1878                 case SPEED_100:
1879                         bmcr |= BMCR_SPEED100;
1880                         break;
1881
1882                 case SPEED_1000:
1883                         bmcr |= TG3_BMCR_SPEED1000;
1884                         break;
1885                 };
1886
1887                 if (tp->link_config.duplex == DUPLEX_FULL)
1888                         bmcr |= BMCR_FULLDPLX;
1889
1890                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1891                     (bmcr != orig_bmcr)) {
1892                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1893                         for (i = 0; i < 1500; i++) {
1894                                 u32 tmp;
1895
1896                                 udelay(10);
1897                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1898                                     tg3_readphy(tp, MII_BMSR, &tmp))
1899                                         continue;
1900                                 if (!(tmp & BMSR_LSTATUS)) {
1901                                         udelay(40);
1902                                         break;
1903                                 }
1904                         }
1905                         tg3_writephy(tp, MII_BMCR, bmcr);
1906                         udelay(40);
1907                 }
1908         } else {
1909                 tg3_writephy(tp, MII_BMCR,
1910                              BMCR_ANENABLE | BMCR_ANRESTART);
1911         }
1912 }
1913
1914 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1915 {
1916         int err;
1917
1918         /* Turn off tap power management. */
1919         /* Set Extended packet length bit */
1920         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1921
1922         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1923         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1924
1925         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1926         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1927
1928         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1929         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1930
1931         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1932         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1933
1934         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1935         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1936
1937         udelay(40);
1938
1939         return err;
1940 }
1941
1942 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1943 {
1944         u32 adv_reg, all_mask = 0;
1945
1946         if (mask & ADVERTISED_10baseT_Half)
1947                 all_mask |= ADVERTISE_10HALF;
1948         if (mask & ADVERTISED_10baseT_Full)
1949                 all_mask |= ADVERTISE_10FULL;
1950         if (mask & ADVERTISED_100baseT_Half)
1951                 all_mask |= ADVERTISE_100HALF;
1952         if (mask & ADVERTISED_100baseT_Full)
1953                 all_mask |= ADVERTISE_100FULL;
1954
1955         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1956                 return 0;
1957
1958         if ((adv_reg & all_mask) != all_mask)
1959                 return 0;
1960         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1961                 u32 tg3_ctrl;
1962
1963                 all_mask = 0;
1964                 if (mask & ADVERTISED_1000baseT_Half)
1965                         all_mask |= ADVERTISE_1000HALF;
1966                 if (mask & ADVERTISED_1000baseT_Full)
1967                         all_mask |= ADVERTISE_1000FULL;
1968
1969                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1970                         return 0;
1971
1972                 if ((tg3_ctrl & all_mask) != all_mask)
1973                         return 0;
1974         }
1975         return 1;
1976 }
1977
1978 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1979 {
1980         int current_link_up;
1981         u32 bmsr, dummy;
1982         u16 current_speed;
1983         u8 current_duplex;
1984         int i, err;
1985
1986         tw32(MAC_EVENT, 0);
1987
1988         tw32_f(MAC_STATUS,
1989              (MAC_STATUS_SYNC_CHANGED |
1990               MAC_STATUS_CFG_CHANGED |
1991               MAC_STATUS_MI_COMPLETION |
1992               MAC_STATUS_LNKSTATE_CHANGED));
1993         udelay(40);
1994
1995         tp->mi_mode = MAC_MI_MODE_BASE;
1996         tw32_f(MAC_MI_MODE, tp->mi_mode);
1997         udelay(80);
1998
1999         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2000
2001         /* Some third-party PHYs need to be reset on link going
2002          * down.
2003          */
2004         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2005              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2006              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2007             netif_carrier_ok(tp->dev)) {
2008                 tg3_readphy(tp, MII_BMSR, &bmsr);
2009                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2010                     !(bmsr & BMSR_LSTATUS))
2011                         force_reset = 1;
2012         }
2013         if (force_reset)
2014                 tg3_phy_reset(tp);
2015
2016         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2017                 tg3_readphy(tp, MII_BMSR, &bmsr);
2018                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2019                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2020                         bmsr = 0;
2021
2022                 if (!(bmsr & BMSR_LSTATUS)) {
2023                         err = tg3_init_5401phy_dsp(tp);
2024                         if (err)
2025                                 return err;
2026
2027                         tg3_readphy(tp, MII_BMSR, &bmsr);
2028                         for (i = 0; i < 1000; i++) {
2029                                 udelay(10);
2030                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2031                                     (bmsr & BMSR_LSTATUS)) {
2032                                         udelay(40);
2033                                         break;
2034                                 }
2035                         }
2036
2037                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2038                             !(bmsr & BMSR_LSTATUS) &&
2039                             tp->link_config.active_speed == SPEED_1000) {
2040                                 err = tg3_phy_reset(tp);
2041                                 if (!err)
2042                                         err = tg3_init_5401phy_dsp(tp);
2043                                 if (err)
2044                                         return err;
2045                         }
2046                 }
2047         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2048                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2049                 /* 5701 {A0,B0} CRC bug workaround */
2050                 tg3_writephy(tp, 0x15, 0x0a75);
2051                 tg3_writephy(tp, 0x1c, 0x8c68);
2052                 tg3_writephy(tp, 0x1c, 0x8d68);
2053                 tg3_writephy(tp, 0x1c, 0x8c68);
2054         }
2055
2056         /* Clear pending interrupts... */
2057         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2058         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2059
2060         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2061                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2062         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2063                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2064
2065         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2066             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2067                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2068                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2069                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2070                 else
2071                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2072         }
2073
2074         current_link_up = 0;
2075         current_speed = SPEED_INVALID;
2076         current_duplex = DUPLEX_INVALID;
2077
2078         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2079                 u32 val;
2080
2081                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2082                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2083                 if (!(val & (1 << 10))) {
2084                         val |= (1 << 10);
2085                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2086                         goto relink;
2087                 }
2088         }
2089
2090         bmsr = 0;
2091         for (i = 0; i < 100; i++) {
2092                 tg3_readphy(tp, MII_BMSR, &bmsr);
2093                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2094                     (bmsr & BMSR_LSTATUS))
2095                         break;
2096                 udelay(40);
2097         }
2098
2099         if (bmsr & BMSR_LSTATUS) {
2100                 u32 aux_stat, bmcr;
2101
2102                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2103                 for (i = 0; i < 2000; i++) {
2104                         udelay(10);
2105                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2106                             aux_stat)
2107                                 break;
2108                 }
2109
2110                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2111                                              &current_speed,
2112                                              &current_duplex);
2113
2114                 bmcr = 0;
2115                 for (i = 0; i < 200; i++) {
2116                         tg3_readphy(tp, MII_BMCR, &bmcr);
2117                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2118                                 continue;
2119                         if (bmcr && bmcr != 0x7fff)
2120                                 break;
2121                         udelay(10);
2122                 }
2123
2124                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2125                         if (bmcr & BMCR_ANENABLE) {
2126                                 current_link_up = 1;
2127
2128                                 /* Force autoneg restart if we are exiting
2129                                  * low power mode.
2130                                  */
2131                                 if (!tg3_copper_is_advertising_all(tp,
2132                                                 tp->link_config.advertising))
2133                                         current_link_up = 0;
2134                         } else {
2135                                 current_link_up = 0;
2136                         }
2137                 } else {
2138                         if (!(bmcr & BMCR_ANENABLE) &&
2139                             tp->link_config.speed == current_speed &&
2140                             tp->link_config.duplex == current_duplex) {
2141                                 current_link_up = 1;
2142                         } else {
2143                                 current_link_up = 0;
2144                         }
2145                 }
2146
2147                 tp->link_config.active_speed = current_speed;
2148                 tp->link_config.active_duplex = current_duplex;
2149         }
2150
2151         if (current_link_up == 1 &&
2152             (tp->link_config.active_duplex == DUPLEX_FULL) &&
2153             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2154                 u32 local_adv, remote_adv;
2155
2156                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
2157                         local_adv = 0;
2158
2159                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
2160                         remote_adv = 0;
2161
2162                 /* If we are not advertising what has been requested,
2163                  * bring the link down and reconfigure.
2164                  */
2165                 if (local_adv !=
2166                     tg3_advert_flowctrl_1000T(tp->link_config.flowctrl)) {
2167                         current_link_up = 0;
2168                 } else {
2169                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2170                 }
2171         }
2172 relink:
2173         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2174                 u32 tmp;
2175
2176                 tg3_phy_copper_begin(tp);
2177
2178                 tg3_readphy(tp, MII_BMSR, &tmp);
2179                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2180                     (tmp & BMSR_LSTATUS))
2181                         current_link_up = 1;
2182         }
2183
2184         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2185         if (current_link_up == 1) {
2186                 if (tp->link_config.active_speed == SPEED_100 ||
2187                     tp->link_config.active_speed == SPEED_10)
2188                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2189                 else
2190                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2191         } else
2192                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2193
2194         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2195         if (tp->link_config.active_duplex == DUPLEX_HALF)
2196                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2197
2198         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2199                 if (current_link_up == 1 &&
2200                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2201                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2202                 else
2203                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2204         }
2205
2206         /* ??? Without this setting Netgear GA302T PHY does not
2207          * ??? send/receive packets...
2208          */
2209         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2210             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2211                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2212                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2213                 udelay(80);
2214         }
2215
2216         tw32_f(MAC_MODE, tp->mac_mode);
2217         udelay(40);
2218
2219         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2220                 /* Polled via timer. */
2221                 tw32_f(MAC_EVENT, 0);
2222         } else {
2223                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2224         }
2225         udelay(40);
2226
2227         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2228             current_link_up == 1 &&
2229             tp->link_config.active_speed == SPEED_1000 &&
2230             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2231              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2232                 udelay(120);
2233                 tw32_f(MAC_STATUS,
2234                      (MAC_STATUS_SYNC_CHANGED |
2235                       MAC_STATUS_CFG_CHANGED));
2236                 udelay(40);
2237                 tg3_write_mem(tp,
2238                               NIC_SRAM_FIRMWARE_MBOX,
2239                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2240         }
2241
2242         if (current_link_up != netif_carrier_ok(tp->dev)) {
2243                 if (current_link_up)
2244                         netif_carrier_on(tp->dev);
2245                 else
2246                         netif_carrier_off(tp->dev);
2247                 tg3_link_report(tp);
2248         }
2249
2250         return 0;
2251 }
2252
2253 struct tg3_fiber_aneginfo {
2254         int state;
2255 #define ANEG_STATE_UNKNOWN              0
2256 #define ANEG_STATE_AN_ENABLE            1
2257 #define ANEG_STATE_RESTART_INIT         2
2258 #define ANEG_STATE_RESTART              3
2259 #define ANEG_STATE_DISABLE_LINK_OK      4
2260 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2261 #define ANEG_STATE_ABILITY_DETECT       6
2262 #define ANEG_STATE_ACK_DETECT_INIT      7
2263 #define ANEG_STATE_ACK_DETECT           8
2264 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2265 #define ANEG_STATE_COMPLETE_ACK         10
2266 #define ANEG_STATE_IDLE_DETECT_INIT     11
2267 #define ANEG_STATE_IDLE_DETECT          12
2268 #define ANEG_STATE_LINK_OK              13
2269 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2270 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2271
2272         u32 flags;
2273 #define MR_AN_ENABLE            0x00000001
2274 #define MR_RESTART_AN           0x00000002
2275 #define MR_AN_COMPLETE          0x00000004
2276 #define MR_PAGE_RX              0x00000008
2277 #define MR_NP_LOADED            0x00000010
2278 #define MR_TOGGLE_TX            0x00000020
2279 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2280 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2281 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2282 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2283 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2284 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2285 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2286 #define MR_TOGGLE_RX            0x00002000
2287 #define MR_NP_RX                0x00004000
2288
2289 #define MR_LINK_OK              0x80000000
2290
2291         unsigned long link_time, cur_time;
2292
2293         u32 ability_match_cfg;
2294         int ability_match_count;
2295
2296         char ability_match, idle_match, ack_match;
2297
2298         u32 txconfig, rxconfig;
2299 #define ANEG_CFG_NP             0x00000080
2300 #define ANEG_CFG_ACK            0x00000040
2301 #define ANEG_CFG_RF2            0x00000020
2302 #define ANEG_CFG_RF1            0x00000010
2303 #define ANEG_CFG_PS2            0x00000001
2304 #define ANEG_CFG_PS1            0x00008000
2305 #define ANEG_CFG_HD             0x00004000
2306 #define ANEG_CFG_FD             0x00002000
2307 #define ANEG_CFG_INVAL          0x00001f06
2308
2309 };
2310 #define ANEG_OK         0
2311 #define ANEG_DONE       1
2312 #define ANEG_TIMER_ENAB 2
2313 #define ANEG_FAILED     -1
2314
2315 #define ANEG_STATE_SETTLE_TIME  10000
2316
2317 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2318                                    struct tg3_fiber_aneginfo *ap)
2319 {
2320         unsigned long delta;
2321         u32 rx_cfg_reg;
2322         int ret;
2323
2324         if (ap->state == ANEG_STATE_UNKNOWN) {
2325                 ap->rxconfig = 0;
2326                 ap->link_time = 0;
2327                 ap->cur_time = 0;
2328                 ap->ability_match_cfg = 0;
2329                 ap->ability_match_count = 0;
2330                 ap->ability_match = 0;
2331                 ap->idle_match = 0;
2332                 ap->ack_match = 0;
2333         }
2334         ap->cur_time++;
2335
2336         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2337                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2338
2339                 if (rx_cfg_reg != ap->ability_match_cfg) {
2340                         ap->ability_match_cfg = rx_cfg_reg;
2341                         ap->ability_match = 0;
2342                         ap->ability_match_count = 0;
2343                 } else {
2344                         if (++ap->ability_match_count > 1) {
2345                                 ap->ability_match = 1;
2346                                 ap->ability_match_cfg = rx_cfg_reg;
2347                         }
2348                 }
2349                 if (rx_cfg_reg & ANEG_CFG_ACK)
2350                         ap->ack_match = 1;
2351                 else
2352                         ap->ack_match = 0;
2353
2354                 ap->idle_match = 0;
2355         } else {
2356                 ap->idle_match = 1;
2357                 ap->ability_match_cfg = 0;
2358                 ap->ability_match_count = 0;
2359                 ap->ability_match = 0;
2360                 ap->ack_match = 0;
2361
2362                 rx_cfg_reg = 0;
2363         }
2364
2365         ap->rxconfig = rx_cfg_reg;
2366         ret = ANEG_OK;
2367
2368         switch(ap->state) {
2369         case ANEG_STATE_UNKNOWN:
2370                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2371                         ap->state = ANEG_STATE_AN_ENABLE;
2372
2373                 /* fallthru */
2374         case ANEG_STATE_AN_ENABLE:
2375                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2376                 if (ap->flags & MR_AN_ENABLE) {
2377                         ap->link_time = 0;
2378                         ap->cur_time = 0;
2379                         ap->ability_match_cfg = 0;
2380                         ap->ability_match_count = 0;
2381                         ap->ability_match = 0;
2382                         ap->idle_match = 0;
2383                         ap->ack_match = 0;
2384
2385                         ap->state = ANEG_STATE_RESTART_INIT;
2386                 } else {
2387                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2388                 }
2389                 break;
2390
2391         case ANEG_STATE_RESTART_INIT:
2392                 ap->link_time = ap->cur_time;
2393                 ap->flags &= ~(MR_NP_LOADED);
2394                 ap->txconfig = 0;
2395                 tw32(MAC_TX_AUTO_NEG, 0);
2396                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2397                 tw32_f(MAC_MODE, tp->mac_mode);
2398                 udelay(40);
2399
2400                 ret = ANEG_TIMER_ENAB;
2401                 ap->state = ANEG_STATE_RESTART;
2402
2403                 /* fallthru */
2404         case ANEG_STATE_RESTART:
2405                 delta = ap->cur_time - ap->link_time;
2406                 if (delta > ANEG_STATE_SETTLE_TIME) {
2407                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2408                 } else {
2409                         ret = ANEG_TIMER_ENAB;
2410                 }
2411                 break;
2412
2413         case ANEG_STATE_DISABLE_LINK_OK:
2414                 ret = ANEG_DONE;
2415                 break;
2416
2417         case ANEG_STATE_ABILITY_DETECT_INIT:
2418                 ap->flags &= ~(MR_TOGGLE_TX);
2419                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2420                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2421                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2422                 tw32_f(MAC_MODE, tp->mac_mode);
2423                 udelay(40);
2424
2425                 ap->state = ANEG_STATE_ABILITY_DETECT;
2426                 break;
2427
2428         case ANEG_STATE_ABILITY_DETECT:
2429                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2430                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2431                 }
2432                 break;
2433
2434         case ANEG_STATE_ACK_DETECT_INIT:
2435                 ap->txconfig |= ANEG_CFG_ACK;
2436                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2437                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2438                 tw32_f(MAC_MODE, tp->mac_mode);
2439                 udelay(40);
2440
2441                 ap->state = ANEG_STATE_ACK_DETECT;
2442
2443                 /* fallthru */
2444         case ANEG_STATE_ACK_DETECT:
2445                 if (ap->ack_match != 0) {
2446                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2447                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2448                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2449                         } else {
2450                                 ap->state = ANEG_STATE_AN_ENABLE;
2451                         }
2452                 } else if (ap->ability_match != 0 &&
2453                            ap->rxconfig == 0) {
2454                         ap->state = ANEG_STATE_AN_ENABLE;
2455                 }
2456                 break;
2457
2458         case ANEG_STATE_COMPLETE_ACK_INIT:
2459                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2460                         ret = ANEG_FAILED;
2461                         break;
2462                 }
2463                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2464                                MR_LP_ADV_HALF_DUPLEX |
2465                                MR_LP_ADV_SYM_PAUSE |
2466                                MR_LP_ADV_ASYM_PAUSE |
2467                                MR_LP_ADV_REMOTE_FAULT1 |
2468                                MR_LP_ADV_REMOTE_FAULT2 |
2469                                MR_LP_ADV_NEXT_PAGE |
2470                                MR_TOGGLE_RX |
2471                                MR_NP_RX);
2472                 if (ap->rxconfig & ANEG_CFG_FD)
2473                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2474                 if (ap->rxconfig & ANEG_CFG_HD)
2475                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2476                 if (ap->rxconfig & ANEG_CFG_PS1)
2477                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2478                 if (ap->rxconfig & ANEG_CFG_PS2)
2479                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2480                 if (ap->rxconfig & ANEG_CFG_RF1)
2481                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2482                 if (ap->rxconfig & ANEG_CFG_RF2)
2483                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2484                 if (ap->rxconfig & ANEG_CFG_NP)
2485                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2486
2487                 ap->link_time = ap->cur_time;
2488
2489                 ap->flags ^= (MR_TOGGLE_TX);
2490                 if (ap->rxconfig & 0x0008)
2491                         ap->flags |= MR_TOGGLE_RX;
2492                 if (ap->rxconfig & ANEG_CFG_NP)
2493                         ap->flags |= MR_NP_RX;
2494                 ap->flags |= MR_PAGE_RX;
2495
2496                 ap->state = ANEG_STATE_COMPLETE_ACK;
2497                 ret = ANEG_TIMER_ENAB;
2498                 break;
2499
2500         case ANEG_STATE_COMPLETE_ACK:
2501                 if (ap->ability_match != 0 &&
2502                     ap->rxconfig == 0) {
2503                         ap->state = ANEG_STATE_AN_ENABLE;
2504                         break;
2505                 }
2506                 delta = ap->cur_time - ap->link_time;
2507                 if (delta > ANEG_STATE_SETTLE_TIME) {
2508                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2509                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2510                         } else {
2511                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2512                                     !(ap->flags & MR_NP_RX)) {
2513                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2514                                 } else {
2515                                         ret = ANEG_FAILED;
2516                                 }
2517                         }
2518                 }
2519                 break;
2520
2521         case ANEG_STATE_IDLE_DETECT_INIT:
2522                 ap->link_time = ap->cur_time;
2523                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2524                 tw32_f(MAC_MODE, tp->mac_mode);
2525                 udelay(40);
2526
2527                 ap->state = ANEG_STATE_IDLE_DETECT;
2528                 ret = ANEG_TIMER_ENAB;
2529                 break;
2530
2531         case ANEG_STATE_IDLE_DETECT:
2532                 if (ap->ability_match != 0 &&
2533                     ap->rxconfig == 0) {
2534                         ap->state = ANEG_STATE_AN_ENABLE;
2535                         break;
2536                 }
2537                 delta = ap->cur_time - ap->link_time;
2538                 if (delta > ANEG_STATE_SETTLE_TIME) {
2539                         /* XXX another gem from the Broadcom driver :( */
2540                         ap->state = ANEG_STATE_LINK_OK;
2541                 }
2542                 break;
2543
2544         case ANEG_STATE_LINK_OK:
2545                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2546                 ret = ANEG_DONE;
2547                 break;
2548
2549         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2550                 /* ??? unimplemented */
2551                 break;
2552
2553         case ANEG_STATE_NEXT_PAGE_WAIT:
2554                 /* ??? unimplemented */
2555                 break;
2556
2557         default:
2558                 ret = ANEG_FAILED;
2559                 break;
2560         };
2561
2562         return ret;
2563 }
2564
2565 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2566 {
2567         int res = 0;
2568         struct tg3_fiber_aneginfo aninfo;
2569         int status = ANEG_FAILED;
2570         unsigned int tick;
2571         u32 tmp;
2572
2573         tw32_f(MAC_TX_AUTO_NEG, 0);
2574
2575         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2576         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2577         udelay(40);
2578
2579         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2580         udelay(40);
2581
2582         memset(&aninfo, 0, sizeof(aninfo));
2583         aninfo.flags |= MR_AN_ENABLE;
2584         aninfo.state = ANEG_STATE_UNKNOWN;
2585         aninfo.cur_time = 0;
2586         tick = 0;
2587         while (++tick < 195000) {
2588                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2589                 if (status == ANEG_DONE || status == ANEG_FAILED)
2590                         break;
2591
2592                 udelay(1);
2593         }
2594
2595         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2596         tw32_f(MAC_MODE, tp->mac_mode);
2597         udelay(40);
2598
2599         *flags = aninfo.flags;
2600
2601         if (status == ANEG_DONE &&
2602             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2603                              MR_LP_ADV_FULL_DUPLEX)))
2604                 res = 1;
2605
2606         return res;
2607 }
2608
2609 static void tg3_init_bcm8002(struct tg3 *tp)
2610 {
2611         u32 mac_status = tr32(MAC_STATUS);
2612         int i;
2613
2614         /* Reset when initting first time or we have a link. */
2615         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2616             !(mac_status & MAC_STATUS_PCS_SYNCED))
2617                 return;
2618
2619         /* Set PLL lock range. */
2620         tg3_writephy(tp, 0x16, 0x8007);
2621
2622         /* SW reset */
2623         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2624
2625         /* Wait for reset to complete. */
2626         /* XXX schedule_timeout() ... */
2627         for (i = 0; i < 500; i++)
2628                 udelay(10);
2629
2630         /* Config mode; select PMA/Ch 1 regs. */
2631         tg3_writephy(tp, 0x10, 0x8411);
2632
2633         /* Enable auto-lock and comdet, select txclk for tx. */
2634         tg3_writephy(tp, 0x11, 0x0a10);
2635
2636         tg3_writephy(tp, 0x18, 0x00a0);
2637         tg3_writephy(tp, 0x16, 0x41ff);
2638
2639         /* Assert and deassert POR. */
2640         tg3_writephy(tp, 0x13, 0x0400);
2641         udelay(40);
2642         tg3_writephy(tp, 0x13, 0x0000);
2643
2644         tg3_writephy(tp, 0x11, 0x0a50);
2645         udelay(40);
2646         tg3_writephy(tp, 0x11, 0x0a10);
2647
2648         /* Wait for signal to stabilize */
2649         /* XXX schedule_timeout() ... */
2650         for (i = 0; i < 15000; i++)
2651                 udelay(10);
2652
2653         /* Deselect the channel register so we can read the PHYID
2654          * later.
2655          */
2656         tg3_writephy(tp, 0x10, 0x8011);
2657 }
2658
2659 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2660 {
2661         u16 flowctrl;
2662         u32 sg_dig_ctrl, sg_dig_status;
2663         u32 serdes_cfg, expected_sg_dig_ctrl;
2664         int workaround, port_a;
2665         int current_link_up;
2666
2667         serdes_cfg = 0;
2668         expected_sg_dig_ctrl = 0;
2669         workaround = 0;
2670         port_a = 1;
2671         current_link_up = 0;
2672
2673         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2674             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2675                 workaround = 1;
2676                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2677                         port_a = 0;
2678
2679                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2680                 /* preserve bits 20-23 for voltage regulator */
2681                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2682         }
2683
2684         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2685
2686         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2687                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
2688                         if (workaround) {
2689                                 u32 val = serdes_cfg;
2690
2691                                 if (port_a)
2692                                         val |= 0xc010000;
2693                                 else
2694                                         val |= 0x4010000;
2695                                 tw32_f(MAC_SERDES_CFG, val);
2696                         }
2697
2698                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
2699                 }
2700                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2701                         tg3_setup_flow_control(tp, 0, 0);
2702                         current_link_up = 1;
2703                 }
2704                 goto out;
2705         }
2706
2707         /* Want auto-negotiation.  */
2708         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
2709
2710         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2711         if (flowctrl & ADVERTISE_1000XPAUSE)
2712                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
2713         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2714                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
2715
2716         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2717                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2718                     tp->serdes_counter &&
2719                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2720                                     MAC_STATUS_RCVD_CFG)) ==
2721                      MAC_STATUS_PCS_SYNCED)) {
2722                         tp->serdes_counter--;
2723                         current_link_up = 1;
2724                         goto out;
2725                 }
2726 restart_autoneg:
2727                 if (workaround)
2728                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2729                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
2730                 udelay(5);
2731                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2732
2733                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2734                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2735         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2736                                  MAC_STATUS_SIGNAL_DET)) {
2737                 sg_dig_status = tr32(SG_DIG_STATUS);
2738                 mac_status = tr32(MAC_STATUS);
2739
2740                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
2741                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2742                         u32 local_adv = 0, remote_adv = 0;
2743
2744                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
2745                                 local_adv |= ADVERTISE_1000XPAUSE;
2746                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
2747                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
2748
2749                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
2750                                 remote_adv |= LPA_1000XPAUSE;
2751                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
2752                                 remote_adv |= LPA_1000XPAUSE_ASYM;
2753
2754                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2755                         current_link_up = 1;
2756                         tp->serdes_counter = 0;
2757                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2758                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
2759                         if (tp->serdes_counter)
2760                                 tp->serdes_counter--;
2761                         else {
2762                                 if (workaround) {
2763                                         u32 val = serdes_cfg;
2764
2765                                         if (port_a)
2766                                                 val |= 0xc010000;
2767                                         else
2768                                                 val |= 0x4010000;
2769
2770                                         tw32_f(MAC_SERDES_CFG, val);
2771                                 }
2772
2773                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
2774                                 udelay(40);
2775
2776                                 /* Link parallel detection - link is up */
2777                                 /* only if we have PCS_SYNC and not */
2778                                 /* receiving config code words */
2779                                 mac_status = tr32(MAC_STATUS);
2780                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2781                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2782                                         tg3_setup_flow_control(tp, 0, 0);
2783                                         current_link_up = 1;
2784                                         tp->tg3_flags2 |=
2785                                                 TG3_FLG2_PARALLEL_DETECT;
2786                                         tp->serdes_counter =
2787                                                 SERDES_PARALLEL_DET_TIMEOUT;
2788                                 } else
2789                                         goto restart_autoneg;
2790                         }
2791                 }
2792         } else {
2793                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2794                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2795         }
2796
2797 out:
2798         return current_link_up;
2799 }
2800
2801 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2802 {
2803         int current_link_up = 0;
2804
2805         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2806                 goto out;
2807
2808         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2809                 u32 flags;
2810                 int i;
2811
2812                 if (fiber_autoneg(tp, &flags)) {
2813                         u32 local_adv, remote_adv;
2814
2815                         local_adv = ADVERTISE_PAUSE_CAP;
2816                         remote_adv = 0;
2817                         if (flags & MR_LP_ADV_SYM_PAUSE)
2818                                 remote_adv |= LPA_PAUSE_CAP;
2819                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2820                                 remote_adv |= LPA_PAUSE_ASYM;
2821
2822                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2823
2824                         current_link_up = 1;
2825                 }
2826                 for (i = 0; i < 30; i++) {
2827                         udelay(20);
2828                         tw32_f(MAC_STATUS,
2829                                (MAC_STATUS_SYNC_CHANGED |
2830                                 MAC_STATUS_CFG_CHANGED));
2831                         udelay(40);
2832                         if ((tr32(MAC_STATUS) &
2833                              (MAC_STATUS_SYNC_CHANGED |
2834                               MAC_STATUS_CFG_CHANGED)) == 0)
2835                                 break;
2836                 }
2837
2838                 mac_status = tr32(MAC_STATUS);
2839                 if (current_link_up == 0 &&
2840                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2841                     !(mac_status & MAC_STATUS_RCVD_CFG))
2842                         current_link_up = 1;
2843         } else {
2844                 /* Forcing 1000FD link up. */
2845                 current_link_up = 1;
2846
2847                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2848                 udelay(40);
2849
2850                 tw32_f(MAC_MODE, tp->mac_mode);
2851                 udelay(40);
2852         }
2853
2854 out:
2855         return current_link_up;
2856 }
2857
2858 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2859 {
2860         u32 orig_pause_cfg;
2861         u16 orig_active_speed;
2862         u8 orig_active_duplex;
2863         u32 mac_status;
2864         int current_link_up;
2865         int i;
2866
2867         orig_pause_cfg = tp->link_config.active_flowctrl;
2868         orig_active_speed = tp->link_config.active_speed;
2869         orig_active_duplex = tp->link_config.active_duplex;
2870
2871         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2872             netif_carrier_ok(tp->dev) &&
2873             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2874                 mac_status = tr32(MAC_STATUS);
2875                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2876                                MAC_STATUS_SIGNAL_DET |
2877                                MAC_STATUS_CFG_CHANGED |
2878                                MAC_STATUS_RCVD_CFG);
2879                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2880                                    MAC_STATUS_SIGNAL_DET)) {
2881                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2882                                             MAC_STATUS_CFG_CHANGED));
2883                         return 0;
2884                 }
2885         }
2886
2887         tw32_f(MAC_TX_AUTO_NEG, 0);
2888
2889         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2890         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2891         tw32_f(MAC_MODE, tp->mac_mode);
2892         udelay(40);
2893
2894         if (tp->phy_id == PHY_ID_BCM8002)
2895                 tg3_init_bcm8002(tp);
2896
2897         /* Enable link change event even when serdes polling.  */
2898         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2899         udelay(40);
2900
2901         current_link_up = 0;
2902         mac_status = tr32(MAC_STATUS);
2903
2904         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2905                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2906         else
2907                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2908
2909         tp->hw_status->status =
2910                 (SD_STATUS_UPDATED |
2911                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2912
2913         for (i = 0; i < 100; i++) {
2914                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2915                                     MAC_STATUS_CFG_CHANGED));
2916                 udelay(5);
2917                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2918                                          MAC_STATUS_CFG_CHANGED |
2919                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2920                         break;
2921         }
2922
2923         mac_status = tr32(MAC_STATUS);
2924         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2925                 current_link_up = 0;
2926                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2927                     tp->serdes_counter == 0) {
2928                         tw32_f(MAC_MODE, (tp->mac_mode |
2929                                           MAC_MODE_SEND_CONFIGS));
2930                         udelay(1);
2931                         tw32_f(MAC_MODE, tp->mac_mode);
2932                 }
2933         }
2934
2935         if (current_link_up == 1) {
2936                 tp->link_config.active_speed = SPEED_1000;
2937                 tp->link_config.active_duplex = DUPLEX_FULL;
2938                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2939                                     LED_CTRL_LNKLED_OVERRIDE |
2940                                     LED_CTRL_1000MBPS_ON));
2941         } else {
2942                 tp->link_config.active_speed = SPEED_INVALID;
2943                 tp->link_config.active_duplex = DUPLEX_INVALID;
2944                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2945                                     LED_CTRL_LNKLED_OVERRIDE |
2946                                     LED_CTRL_TRAFFIC_OVERRIDE));
2947         }
2948
2949         if (current_link_up != netif_carrier_ok(tp->dev)) {
2950                 if (current_link_up)
2951                         netif_carrier_on(tp->dev);
2952                 else
2953                         netif_carrier_off(tp->dev);
2954                 tg3_link_report(tp);
2955         } else {
2956                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
2957                 if (orig_pause_cfg != now_pause_cfg ||
2958                     orig_active_speed != tp->link_config.active_speed ||
2959                     orig_active_duplex != tp->link_config.active_duplex)
2960                         tg3_link_report(tp);
2961         }
2962
2963         return 0;
2964 }
2965
2966 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2967 {
2968         int current_link_up, err = 0;
2969         u32 bmsr, bmcr;
2970         u16 current_speed;
2971         u8 current_duplex;
2972
2973         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2974         tw32_f(MAC_MODE, tp->mac_mode);
2975         udelay(40);
2976
2977         tw32(MAC_EVENT, 0);
2978
2979         tw32_f(MAC_STATUS,
2980              (MAC_STATUS_SYNC_CHANGED |
2981               MAC_STATUS_CFG_CHANGED |
2982               MAC_STATUS_MI_COMPLETION |
2983               MAC_STATUS_LNKSTATE_CHANGED));
2984         udelay(40);
2985
2986         if (force_reset)
2987                 tg3_phy_reset(tp);
2988
2989         current_link_up = 0;
2990         current_speed = SPEED_INVALID;
2991         current_duplex = DUPLEX_INVALID;
2992
2993         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2994         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2995         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2996                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2997                         bmsr |= BMSR_LSTATUS;
2998                 else
2999                         bmsr &= ~BMSR_LSTATUS;
3000         }
3001
3002         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3003
3004         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3005             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3006                 /* do nothing, just check for link up at the end */
3007         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3008                 u32 adv, new_adv;
3009
3010                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3011                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3012                                   ADVERTISE_1000XPAUSE |
3013                                   ADVERTISE_1000XPSE_ASYM |
3014                                   ADVERTISE_SLCT);
3015
3016                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3017
3018                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3019                         new_adv |= ADVERTISE_1000XHALF;
3020                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3021                         new_adv |= ADVERTISE_1000XFULL;
3022
3023                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3024                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3025                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3026                         tg3_writephy(tp, MII_BMCR, bmcr);
3027
3028                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3029                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3030                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3031
3032                         return err;
3033                 }
3034         } else {
3035                 u32 new_bmcr;
3036
3037                 bmcr &= ~BMCR_SPEED1000;
3038                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3039
3040                 if (tp->link_config.duplex == DUPLEX_FULL)
3041                         new_bmcr |= BMCR_FULLDPLX;
3042
3043                 if (new_bmcr != bmcr) {
3044                         /* BMCR_SPEED1000 is a reserved bit that needs
3045                          * to be set on write.
3046                          */
3047                         new_bmcr |= BMCR_SPEED1000;
3048
3049                         /* Force a linkdown */
3050                         if (netif_carrier_ok(tp->dev)) {
3051                                 u32 adv;
3052
3053                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3054                                 adv &= ~(ADVERTISE_1000XFULL |
3055                                          ADVERTISE_1000XHALF |
3056                                          ADVERTISE_SLCT);
3057                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3058                                 tg3_writephy(tp, MII_BMCR, bmcr |
3059                                                            BMCR_ANRESTART |
3060                                                            BMCR_ANENABLE);
3061                                 udelay(10);
3062                                 netif_carrier_off(tp->dev);
3063                         }
3064                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3065                         bmcr = new_bmcr;
3066                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3067                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3068                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3069                             ASIC_REV_5714) {
3070                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3071                                         bmsr |= BMSR_LSTATUS;
3072                                 else
3073                                         bmsr &= ~BMSR_LSTATUS;
3074                         }
3075                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3076                 }
3077         }
3078
3079         if (bmsr & BMSR_LSTATUS) {
3080                 current_speed = SPEED_1000;
3081                 current_link_up = 1;
3082                 if (bmcr & BMCR_FULLDPLX)
3083                         current_duplex = DUPLEX_FULL;
3084                 else
3085                         current_duplex = DUPLEX_HALF;
3086
3087                 if (bmcr & BMCR_ANENABLE) {
3088                         u32 local_adv, remote_adv, common;
3089
3090                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3091                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3092                         common = local_adv & remote_adv;
3093                         if (common & (ADVERTISE_1000XHALF |
3094                                       ADVERTISE_1000XFULL)) {
3095                                 if (common & ADVERTISE_1000XFULL)
3096                                         current_duplex = DUPLEX_FULL;
3097                                 else
3098                                         current_duplex = DUPLEX_HALF;
3099
3100                                 tg3_setup_flow_control(tp, local_adv,
3101                                                        remote_adv);
3102                         }
3103                         else
3104                                 current_link_up = 0;
3105                 }
3106         }
3107
3108         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3109         if (tp->link_config.active_duplex == DUPLEX_HALF)
3110                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3111
3112         tw32_f(MAC_MODE, tp->mac_mode);
3113         udelay(40);
3114
3115         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3116
3117         tp->link_config.active_speed = current_speed;
3118         tp->link_config.active_duplex = current_duplex;
3119
3120         if (current_link_up != netif_carrier_ok(tp->dev)) {
3121                 if (current_link_up)
3122                         netif_carrier_on(tp->dev);
3123                 else {
3124                         netif_carrier_off(tp->dev);
3125                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3126                 }
3127                 tg3_link_report(tp);
3128         }
3129         return err;
3130 }
3131
3132 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3133 {
3134         if (tp->serdes_counter) {
3135                 /* Give autoneg time to complete. */
3136                 tp->serdes_counter--;
3137                 return;
3138         }
3139         if (!netif_carrier_ok(tp->dev) &&
3140             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3141                 u32 bmcr;
3142
3143                 tg3_readphy(tp, MII_BMCR, &bmcr);
3144                 if (bmcr & BMCR_ANENABLE) {
3145                         u32 phy1, phy2;
3146
3147                         /* Select shadow register 0x1f */
3148                         tg3_writephy(tp, 0x1c, 0x7c00);
3149                         tg3_readphy(tp, 0x1c, &phy1);
3150
3151                         /* Select expansion interrupt status register */
3152                         tg3_writephy(tp, 0x17, 0x0f01);
3153                         tg3_readphy(tp, 0x15, &phy2);
3154                         tg3_readphy(tp, 0x15, &phy2);
3155
3156                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3157                                 /* We have signal detect and not receiving
3158                                  * config code words, link is up by parallel
3159                                  * detection.
3160                                  */
3161
3162                                 bmcr &= ~BMCR_ANENABLE;
3163                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3164                                 tg3_writephy(tp, MII_BMCR, bmcr);
3165                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3166                         }
3167                 }
3168         }
3169         else if (netif_carrier_ok(tp->dev) &&
3170                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3171                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3172                 u32 phy2;
3173
3174                 /* Select expansion interrupt status register */
3175                 tg3_writephy(tp, 0x17, 0x0f01);
3176                 tg3_readphy(tp, 0x15, &phy2);
3177                 if (phy2 & 0x20) {
3178                         u32 bmcr;
3179
3180                         /* Config code words received, turn on autoneg. */
3181                         tg3_readphy(tp, MII_BMCR, &bmcr);
3182                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3183
3184                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3185
3186                 }
3187         }
3188 }
3189
3190 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3191 {
3192         int err;
3193
3194         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3195                 err = tg3_setup_fiber_phy(tp, force_reset);
3196         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3197                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3198         } else {
3199                 err = tg3_setup_copper_phy(tp, force_reset);
3200         }
3201
3202         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3203             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
3204                 u32 val, scale;
3205
3206                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3207                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3208                         scale = 65;
3209                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3210                         scale = 6;
3211                 else
3212                         scale = 12;
3213
3214                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3215                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3216                 tw32(GRC_MISC_CFG, val);
3217         }
3218
3219         if (tp->link_config.active_speed == SPEED_1000 &&
3220             tp->link_config.active_duplex == DUPLEX_HALF)
3221                 tw32(MAC_TX_LENGTHS,
3222                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3223                       (6 << TX_LENGTHS_IPG_SHIFT) |
3224                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3225         else
3226                 tw32(MAC_TX_LENGTHS,
3227                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3228                       (6 << TX_LENGTHS_IPG_SHIFT) |
3229                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3230
3231         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3232                 if (netif_carrier_ok(tp->dev)) {
3233                         tw32(HOSTCC_STAT_COAL_TICKS,
3234                              tp->coal.stats_block_coalesce_usecs);
3235                 } else {
3236                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3237                 }
3238         }
3239
3240         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3241                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3242                 if (!netif_carrier_ok(tp->dev))
3243                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3244                               tp->pwrmgmt_thresh;
3245                 else
3246                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3247                 tw32(PCIE_PWR_MGMT_THRESH, val);
3248         }
3249
3250         return err;
3251 }
3252
3253 /* This is called whenever we suspect that the system chipset is re-
3254  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3255  * is bogus tx completions. We try to recover by setting the
3256  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3257  * in the workqueue.
3258  */
3259 static void tg3_tx_recover(struct tg3 *tp)
3260 {
3261         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3262                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3263
3264         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3265                "mapped I/O cycles to the network device, attempting to "
3266                "recover. Please report the problem to the driver maintainer "
3267                "and include system chipset information.\n", tp->dev->name);
3268
3269         spin_lock(&tp->lock);
3270         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3271         spin_unlock(&tp->lock);
3272 }
3273
3274 static inline u32 tg3_tx_avail(struct tg3 *tp)
3275 {
3276         smp_mb();
3277         return (tp->tx_pending -
3278                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3279 }
3280
3281 /* Tigon3 never reports partial packet sends.  So we do not
3282  * need special logic to handle SKBs that have not had all
3283  * of their frags sent yet, like SunGEM does.
3284  */
3285 static void tg3_tx(struct tg3 *tp)
3286 {
3287         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3288         u32 sw_idx = tp->tx_cons;
3289
3290         while (sw_idx != hw_idx) {
3291                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3292                 struct sk_buff *skb = ri->skb;
3293                 int i, tx_bug = 0;
3294
3295                 if (unlikely(skb == NULL)) {
3296                         tg3_tx_recover(tp);
3297                         return;
3298                 }
3299
3300                 pci_unmap_single(tp->pdev,
3301                                  pci_unmap_addr(ri, mapping),
3302                                  skb_headlen(skb),
3303                                  PCI_DMA_TODEVICE);
3304
3305                 ri->skb = NULL;
3306
3307                 sw_idx = NEXT_TX(sw_idx);
3308
3309                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3310                         ri = &tp->tx_buffers[sw_idx];
3311                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3312                                 tx_bug = 1;
3313
3314                         pci_unmap_page(tp->pdev,
3315                                        pci_unmap_addr(ri, mapping),
3316                                        skb_shinfo(skb)->frags[i].size,
3317                                        PCI_DMA_TODEVICE);
3318
3319                         sw_idx = NEXT_TX(sw_idx);
3320                 }
3321
3322                 dev_kfree_skb(skb);
3323
3324                 if (unlikely(tx_bug)) {
3325                         tg3_tx_recover(tp);
3326                         return;
3327                 }
3328         }
3329
3330         tp->tx_cons = sw_idx;
3331
3332         /* Need to make the tx_cons update visible to tg3_start_xmit()
3333          * before checking for netif_queue_stopped().  Without the
3334          * memory barrier, there is a small possibility that tg3_start_xmit()
3335          * will miss it and cause the queue to be stopped forever.
3336          */
3337         smp_mb();
3338
3339         if (unlikely(netif_queue_stopped(tp->dev) &&
3340                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3341                 netif_tx_lock(tp->dev);
3342                 if (netif_queue_stopped(tp->dev) &&
3343                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3344                         netif_wake_queue(tp->dev);
3345                 netif_tx_unlock(tp->dev);
3346         }
3347 }
3348
3349 /* Returns size of skb allocated or < 0 on error.
3350  *
3351  * We only need to fill in the address because the other members
3352  * of the RX descriptor are invariant, see tg3_init_rings.
3353  *
3354  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3355  * posting buffers we only dirty the first cache line of the RX
3356  * descriptor (containing the address).  Whereas for the RX status
3357  * buffers the cpu only reads the last cacheline of the RX descriptor
3358  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3359  */
3360 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3361                             int src_idx, u32 dest_idx_unmasked)
3362 {
3363         struct tg3_rx_buffer_desc *desc;
3364         struct ring_info *map, *src_map;
3365         struct sk_buff *skb;
3366         dma_addr_t mapping;
3367         int skb_size, dest_idx;
3368
3369         src_map = NULL;
3370         switch (opaque_key) {
3371         case RXD_OPAQUE_RING_STD:
3372                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3373                 desc = &tp->rx_std[dest_idx];
3374                 map = &tp->rx_std_buffers[dest_idx];
3375                 if (src_idx >= 0)
3376                         src_map = &tp->rx_std_buffers[src_idx];
3377                 skb_size = tp->rx_pkt_buf_sz;
3378                 break;
3379
3380         case RXD_OPAQUE_RING_JUMBO:
3381                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3382                 desc = &tp->rx_jumbo[dest_idx];
3383                 map = &tp->rx_jumbo_buffers[dest_idx];
3384                 if (src_idx >= 0)
3385                         src_map = &tp->rx_jumbo_buffers[src_idx];
3386                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3387                 break;
3388
3389         default:
3390                 return -EINVAL;
3391         };
3392
3393         /* Do not overwrite any of the map or rp information
3394          * until we are sure we can commit to a new buffer.
3395          *
3396          * Callers depend upon this behavior and assume that
3397          * we leave everything unchanged if we fail.
3398          */
3399         skb = netdev_alloc_skb(tp->dev, skb_size);
3400         if (skb == NULL)
3401                 return -ENOMEM;
3402
3403         skb_reserve(skb, tp->rx_offset);
3404
3405         mapping = pci_map_single(tp->pdev, skb->data,
3406                                  skb_size - tp->rx_offset,
3407                                  PCI_DMA_FROMDEVICE);
3408
3409         map->skb = skb;
3410         pci_unmap_addr_set(map, mapping, mapping);
3411
3412         if (src_map != NULL)
3413                 src_map->skb = NULL;
3414
3415         desc->addr_hi = ((u64)mapping >> 32);
3416         desc->addr_lo = ((u64)mapping & 0xffffffff);
3417
3418         return skb_size;
3419 }
3420
3421 /* We only need to move over in the address because the other
3422  * members of the RX descriptor are invariant.  See notes above
3423  * tg3_alloc_rx_skb for full details.
3424  */
3425 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3426                            int src_idx, u32 dest_idx_unmasked)
3427 {
3428         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3429         struct ring_info *src_map, *dest_map;
3430         int dest_idx;
3431
3432         switch (opaque_key) {
3433         case RXD_OPAQUE_RING_STD:
3434                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3435                 dest_desc = &tp->rx_std[dest_idx];
3436                 dest_map = &tp->rx_std_buffers[dest_idx];
3437                 src_desc = &tp->rx_std[src_idx];
3438                 src_map = &tp->rx_std_buffers[src_idx];
3439                 break;
3440
3441         case RXD_OPAQUE_RING_JUMBO:
3442                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3443                 dest_desc = &tp->rx_jumbo[dest_idx];
3444                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3445                 src_desc = &tp->rx_jumbo[src_idx];
3446                 src_map = &tp->rx_jumbo_buffers[src_idx];
3447                 break;
3448
3449         default:
3450                 return;
3451         };
3452
3453         dest_map->skb = src_map->skb;
3454         pci_unmap_addr_set(dest_map, mapping,
3455                            pci_unmap_addr(src_map, mapping));
3456         dest_desc->addr_hi = src_desc->addr_hi;
3457         dest_desc->addr_lo = src_desc->addr_lo;
3458
3459         src_map->skb = NULL;
3460 }
3461
3462 #if TG3_VLAN_TAG_USED
3463 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3464 {
3465         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3466 }
3467 #endif
3468
3469 /* The RX ring scheme is composed of multiple rings which post fresh
3470  * buffers to the chip, and one special ring the chip uses to report
3471  * status back to the host.
3472  *
3473  * The special ring reports the status of received packets to the
3474  * host.  The chip does not write into the original descriptor the
3475  * RX buffer was obtained from.  The chip simply takes the original
3476  * descriptor as provided by the host, updates the status and length
3477  * field, then writes this into the next status ring entry.
3478  *
3479  * Each ring the host uses to post buffers to the chip is described
3480  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3481  * it is first placed into the on-chip ram.  When the packet's length
3482  * is known, it walks down the TG3_BDINFO entries to select the ring.
3483  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3484  * which is within the range of the new packet's length is chosen.
3485  *
3486  * The "separate ring for rx status" scheme may sound queer, but it makes
3487  * sense from a cache coherency perspective.  If only the host writes
3488  * to the buffer post rings, and only the chip writes to the rx status
3489  * rings, then cache lines never move beyond shared-modified state.
3490  * If both the host and chip were to write into the same ring, cache line
3491  * eviction could occur since both entities want it in an exclusive state.
3492  */
3493 static int tg3_rx(struct tg3 *tp, int budget)
3494 {
3495         u32 work_mask, rx_std_posted = 0;
3496         u32 sw_idx = tp->rx_rcb_ptr;
3497         u16 hw_idx;
3498         int received;
3499
3500         hw_idx = tp->hw_status->idx[0].rx_producer;
3501         /*
3502          * We need to order the read of hw_idx and the read of
3503          * the opaque cookie.
3504          */
3505         rmb();
3506         work_mask = 0;
3507         received = 0;
3508         while (sw_idx != hw_idx && budget > 0) {
3509                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3510                 unsigned int len;
3511                 struct sk_buff *skb;
3512                 dma_addr_t dma_addr;
3513                 u32 opaque_key, desc_idx, *post_ptr;
3514
3515                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3516                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3517                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3518                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3519                                                   mapping);
3520                         skb = tp->rx_std_buffers[desc_idx].skb;
3521                         post_ptr = &tp->rx_std_ptr;
3522                         rx_std_posted++;
3523                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3524                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3525                                                   mapping);
3526                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3527                         post_ptr = &tp->rx_jumbo_ptr;
3528                 }
3529                 else {
3530                         goto next_pkt_nopost;
3531                 }
3532
3533                 work_mask |= opaque_key;
3534
3535                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3536                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3537                 drop_it:
3538                         tg3_recycle_rx(tp, opaque_key,
3539                                        desc_idx, *post_ptr);
3540                 drop_it_no_recycle:
3541                         /* Other statistics kept track of by card. */
3542                         tp->net_stats.rx_dropped++;
3543                         goto next_pkt;
3544                 }
3545
3546                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3547
3548                 if (len > RX_COPY_THRESHOLD
3549                         && tp->rx_offset == 2
3550                         /* rx_offset != 2 iff this is a 5701 card running
3551                          * in PCI-X mode [see tg3_get_invariants()] */
3552                 ) {
3553                         int skb_size;
3554
3555                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3556                                                     desc_idx, *post_ptr);
3557                         if (skb_size < 0)
3558                                 goto drop_it;
3559
3560                         pci_unmap_single(tp->pdev, dma_addr,
3561                                          skb_size - tp->rx_offset,
3562                                          PCI_DMA_FROMDEVICE);
3563
3564                         skb_put(skb, len);
3565                 } else {
3566                         struct sk_buff *copy_skb;
3567
3568                         tg3_recycle_rx(tp, opaque_key,
3569                                        desc_idx, *post_ptr);
3570
3571                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3572                         if (copy_skb == NULL)
3573                                 goto drop_it_no_recycle;
3574
3575                         skb_reserve(copy_skb, 2);
3576                         skb_put(copy_skb, len);
3577                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3578                         skb_copy_from_linear_data(skb, copy_skb->data, len);
3579                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3580
3581                         /* We'll reuse the original ring buffer. */
3582                         skb = copy_skb;
3583                 }
3584
3585                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3586                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3587                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3588                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3589                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3590                 else
3591                         skb->ip_summed = CHECKSUM_NONE;
3592
3593                 skb->protocol = eth_type_trans(skb, tp->dev);
3594 #if TG3_VLAN_TAG_USED
3595                 if (tp->vlgrp != NULL &&
3596                     desc->type_flags & RXD_FLAG_VLAN) {
3597                         tg3_vlan_rx(tp, skb,
3598                                     desc->err_vlan & RXD_VLAN_MASK);
3599                 } else
3600 #endif
3601                         netif_receive_skb(skb);
3602
3603                 tp->dev->last_rx = jiffies;
3604                 received++;
3605                 budget--;
3606
3607 next_pkt:
3608                 (*post_ptr)++;
3609
3610                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3611                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3612
3613                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3614                                      TG3_64BIT_REG_LOW, idx);
3615                         work_mask &= ~RXD_OPAQUE_RING_STD;
3616                         rx_std_posted = 0;
3617                 }
3618 next_pkt_nopost:
3619                 sw_idx++;
3620                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3621
3622                 /* Refresh hw_idx to see if there is new work */
3623                 if (sw_idx == hw_idx) {
3624                         hw_idx = tp->hw_status->idx[0].rx_producer;
3625                         rmb();
3626                 }
3627         }
3628
3629         /* ACK the status ring. */
3630         tp->rx_rcb_ptr = sw_idx;
3631         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3632
3633         /* Refill RX ring(s). */
3634         if (work_mask & RXD_OPAQUE_RING_STD) {
3635                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3636                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3637                              sw_idx);
3638         }
3639         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3640                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3641                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3642                              sw_idx);
3643         }
3644         mmiowb();
3645
3646         return received;
3647 }
3648
3649 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
3650 {
3651         struct tg3_hw_status *sblk = tp->hw_status;
3652
3653         /* handle link change and other phy events */
3654         if (!(tp->tg3_flags &
3655               (TG3_FLAG_USE_LINKCHG_REG |
3656                TG3_FLAG_POLL_SERDES))) {
3657                 if (sblk->status & SD_STATUS_LINK_CHG) {
3658                         sblk->status = SD_STATUS_UPDATED |
3659                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3660                         spin_lock(&tp->lock);
3661                         tg3_setup_phy(tp, 0);
3662                         spin_unlock(&tp->lock);
3663                 }
3664         }
3665
3666         /* run TX completion thread */
3667         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3668                 tg3_tx(tp);
3669                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3670                         return work_done;
3671         }
3672
3673         /* run RX thread, within the bounds set by NAPI.
3674          * All RX "locking" is done by ensuring outside
3675          * code synchronizes with tg3->napi.poll()
3676          */
3677         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3678                 work_done += tg3_rx(tp, budget - work_done);
3679
3680         return work_done;
3681 }
3682
3683 static int tg3_poll(struct napi_struct *napi, int budget)
3684 {
3685         struct tg3 *tp = container_of(napi, struct tg3, napi);
3686         int work_done = 0;
3687         struct tg3_hw_status *sblk = tp->hw_status;
3688
3689         while (1) {
3690                 work_done = tg3_poll_work(tp, work_done, budget);
3691
3692                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3693                         goto tx_recovery;
3694
3695                 if (unlikely(work_done >= budget))
3696                         break;
3697
3698                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3699                         /* tp->last_tag is used in tg3_restart_ints() below
3700                          * to tell the hw how much work has been processed,
3701                          * so we must read it before checking for more work.
3702                          */
3703                         tp->last_tag = sblk->status_tag;
3704                         rmb();
3705                 } else
3706                         sblk->status &= ~SD_STATUS_UPDATED;
3707
3708                 if (likely(!tg3_has_work(tp))) {
3709                         netif_rx_complete(tp->dev, napi);
3710                         tg3_restart_ints(tp);
3711                         break;
3712                 }
3713         }
3714
3715         return work_done;
3716
3717 tx_recovery:
3718         /* work_done is guaranteed to be less than budget. */
3719         netif_rx_complete(tp->dev, napi);
3720         schedule_work(&tp->reset_task);
3721         return work_done;
3722 }
3723
3724 static void tg3_irq_quiesce(struct tg3 *tp)
3725 {
3726         BUG_ON(tp->irq_sync);
3727
3728         tp->irq_sync = 1;
3729         smp_mb();
3730
3731         synchronize_irq(tp->pdev->irq);
3732 }
3733
3734 static inline int tg3_irq_sync(struct tg3 *tp)
3735 {
3736         return tp->irq_sync;
3737 }
3738
3739 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3740  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3741  * with as well.  Most of the time, this is not necessary except when
3742  * shutting down the device.
3743  */
3744 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3745 {
3746         spin_lock_bh(&tp->lock);
3747         if (irq_sync)
3748                 tg3_irq_quiesce(tp);
3749 }
3750
3751 static inline void tg3_full_unlock(struct tg3 *tp)
3752 {
3753         spin_unlock_bh(&tp->lock);
3754 }
3755
3756 /* One-shot MSI handler - Chip automatically disables interrupt
3757  * after sending MSI so driver doesn't have to do it.
3758  */
3759 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3760 {
3761         struct net_device *dev = dev_id;
3762         struct tg3 *tp = netdev_priv(dev);
3763
3764         prefetch(tp->hw_status);
3765         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3766
3767         if (likely(!tg3_irq_sync(tp)))
3768                 netif_rx_schedule(dev, &tp->napi);
3769
3770         return IRQ_HANDLED;
3771 }
3772
3773 /* MSI ISR - No need to check for interrupt sharing and no need to
3774  * flush status block and interrupt mailbox. PCI ordering rules
3775  * guarantee that MSI will arrive after the status block.
3776  */
3777 static irqreturn_t tg3_msi(int irq, void *dev_id)
3778 {
3779         struct net_device *dev = dev_id;
3780         struct tg3 *tp = netdev_priv(dev);
3781
3782         prefetch(tp->hw_status);
3783         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3784         /*
3785          * Writing any value to intr-mbox-0 clears PCI INTA# and
3786          * chip-internal interrupt pending events.
3787          * Writing non-zero to intr-mbox-0 additional tells the
3788          * NIC to stop sending us irqs, engaging "in-intr-handler"
3789          * event coalescing.
3790          */
3791         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3792         if (likely(!tg3_irq_sync(tp)))
3793                 netif_rx_schedule(dev, &tp->napi);
3794
3795         return IRQ_RETVAL(1);
3796 }
3797
3798 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3799 {
3800         struct net_device *dev = dev_id;
3801         struct tg3 *tp = netdev_priv(dev);
3802         struct tg3_hw_status *sblk = tp->hw_status;
3803         unsigned int handled = 1;
3804
3805         /* In INTx mode, it is possible for the interrupt to arrive at
3806          * the CPU before the status block posted prior to the interrupt.
3807          * Reading the PCI State register will confirm whether the
3808          * interrupt is ours and will flush the status block.
3809          */
3810         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3811                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3812                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3813                         handled = 0;
3814                         goto out;
3815                 }
3816         }
3817
3818         /*
3819          * Writing any value to intr-mbox-0 clears PCI INTA# and
3820          * chip-internal interrupt pending events.
3821          * Writing non-zero to intr-mbox-0 additional tells the
3822          * NIC to stop sending us irqs, engaging "in-intr-handler"
3823          * event coalescing.
3824          *
3825          * Flush the mailbox to de-assert the IRQ immediately to prevent
3826          * spurious interrupts.  The flush impacts performance but
3827          * excessive spurious interrupts can be worse in some cases.
3828          */
3829         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3830         if (tg3_irq_sync(tp))
3831                 goto out;
3832         sblk->status &= ~SD_STATUS_UPDATED;
3833         if (likely(tg3_has_work(tp))) {
3834                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3835                 netif_rx_schedule(dev, &tp->napi);
3836         } else {
3837                 /* No work, shared interrupt perhaps?  re-enable
3838                  * interrupts, and flush that PCI write
3839                  */
3840                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3841                                0x00000000);
3842         }
3843 out:
3844         return IRQ_RETVAL(handled);
3845 }
3846
3847 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3848 {
3849         struct net_device *dev = dev_id;
3850         struct tg3 *tp = netdev_priv(dev);
3851         struct tg3_hw_status *sblk = tp->hw_status;
3852         unsigned int handled = 1;
3853
3854         /* In INTx mode, it is possible for the interrupt to arrive at
3855          * the CPU before the status block posted prior to the interrupt.
3856          * Reading the PCI State register will confirm whether the
3857          * interrupt is ours and will flush the status block.
3858          */
3859         if (unlikely(sblk->status_tag == tp->last_tag)) {
3860                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3861                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3862                         handled = 0;
3863                         goto out;
3864                 }
3865         }
3866
3867         /*
3868          * writing any value to intr-mbox-0 clears PCI INTA# and
3869          * chip-internal interrupt pending events.
3870          * writing non-zero to intr-mbox-0 additional tells the
3871          * NIC to stop sending us irqs, engaging "in-intr-handler"
3872          * event coalescing.
3873          *
3874          * Flush the mailbox to de-assert the IRQ immediately to prevent
3875          * spurious interrupts.  The flush impacts performance but
3876          * excessive spurious interrupts can be worse in some cases.
3877          */
3878         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3879         if (tg3_irq_sync(tp))
3880                 goto out;
3881         if (netif_rx_schedule_prep(dev, &tp->napi)) {
3882                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3883                 /* Update last_tag to mark that this status has been
3884                  * seen. Because interrupt may be shared, we may be
3885                  * racing with tg3_poll(), so only update last_tag
3886                  * if tg3_poll() is not scheduled.
3887                  */
3888                 tp->last_tag = sblk->status_tag;
3889                 __netif_rx_schedule(dev, &tp->napi);
3890         }
3891 out:
3892         return IRQ_RETVAL(handled);
3893 }
3894
3895 /* ISR for interrupt test */
3896 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
3897 {
3898         struct net_device *dev = dev_id;
3899         struct tg3 *tp = netdev_priv(dev);
3900         struct tg3_hw_status *sblk = tp->hw_status;
3901
3902         if ((sblk->status & SD_STATUS_UPDATED) ||
3903             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3904                 tg3_disable_ints(tp);
3905                 return IRQ_RETVAL(1);
3906         }
3907         return IRQ_RETVAL(0);
3908 }
3909
3910 static int tg3_init_hw(struct tg3 *, int);
3911 static int tg3_halt(struct tg3 *, int, int);
3912
3913 /* Restart hardware after configuration changes, self-test, etc.
3914  * Invoked with tp->lock held.
3915  */
3916 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3917 {
3918         int err;
3919
3920         err = tg3_init_hw(tp, reset_phy);
3921         if (err) {
3922                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3923                        "aborting.\n", tp->dev->name);
3924                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3925                 tg3_full_unlock(tp);
3926                 del_timer_sync(&tp->timer);
3927                 tp->irq_sync = 0;
3928                 napi_enable(&tp->napi);
3929                 dev_close(tp->dev);
3930                 tg3_full_lock(tp, 0);
3931         }
3932         return err;
3933 }
3934
3935 #ifdef CONFIG_NET_POLL_CONTROLLER
3936 static void tg3_poll_controller(struct net_device *dev)
3937 {
3938         struct tg3 *tp = netdev_priv(dev);
3939
3940         tg3_interrupt(tp->pdev->irq, dev);
3941 }
3942 #endif
3943
3944 static void tg3_reset_task(struct work_struct *work)
3945 {
3946         struct tg3 *tp = container_of(work, struct tg3, reset_task);
3947         unsigned int restart_timer;
3948
3949         tg3_full_lock(tp, 0);
3950
3951         if (!netif_running(tp->dev)) {
3952                 tg3_full_unlock(tp);
3953                 return;
3954         }
3955
3956         tg3_full_unlock(tp);
3957
3958         tg3_netif_stop(tp);
3959
3960         tg3_full_lock(tp, 1);
3961
3962         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3963         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3964
3965         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3966                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3967                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3968                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3969                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3970         }
3971
3972         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3973         if (tg3_init_hw(tp, 1))
3974                 goto out;
3975
3976         tg3_netif_start(tp);
3977
3978         if (restart_timer)
3979                 mod_timer(&tp->timer, jiffies + 1);
3980
3981 out:
3982         tg3_full_unlock(tp);
3983 }
3984
3985 static void tg3_dump_short_state(struct tg3 *tp)
3986 {
3987         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
3988                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
3989         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
3990                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
3991 }
3992
3993 static void tg3_tx_timeout(struct net_device *dev)
3994 {
3995         struct tg3 *tp = netdev_priv(dev);
3996
3997         if (netif_msg_tx_err(tp)) {
3998                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3999                        dev->name);
4000                 tg3_dump_short_state(tp);
4001         }
4002
4003         schedule_work(&tp->reset_task);
4004 }
4005
4006 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4007 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4008 {
4009         u32 base = (u32) mapping & 0xffffffff;
4010
4011         return ((base > 0xffffdcc0) &&
4012                 (base + len + 8 < base));
4013 }
4014
4015 /* Test for DMA addresses > 40-bit */
4016 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4017                                           int len)
4018 {
4019 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4020         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4021                 return (((u64) mapping + len) > DMA_40BIT_MASK);
4022         return 0;
4023 #else
4024         return 0;
4025 #endif
4026 }
4027
4028 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4029
4030 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4031 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4032                                        u32 last_plus_one, u32 *start,
4033                                        u32 base_flags, u32 mss)
4034 {
4035         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
4036         dma_addr_t new_addr = 0;
4037         u32 entry = *start;
4038         int i, ret = 0;
4039
4040         if (!new_skb) {
4041                 ret = -1;
4042         } else {
4043                 /* New SKB is guaranteed to be linear. */
4044                 entry = *start;
4045                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
4046                                           PCI_DMA_TODEVICE);
4047                 /* Make sure new skb does not cross any 4G boundaries.
4048                  * Drop the packet if it does.
4049                  */
4050                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
4051                         ret = -1;
4052                         dev_kfree_skb(new_skb);
4053                         new_skb = NULL;
4054                 } else {
4055                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
4056                                     base_flags, 1 | (mss << 1));
4057                         *start = NEXT_TX(entry);
4058                 }
4059         }
4060
4061         /* Now clean up the sw ring entries. */
4062         i = 0;
4063         while (entry != last_plus_one) {
4064                 int len;
4065
4066                 if (i == 0)
4067                         len = skb_headlen(skb);
4068                 else
4069                         len = skb_shinfo(skb)->frags[i-1].size;
4070                 pci_unmap_single(tp->pdev,
4071                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4072                                  len, PCI_DMA_TODEVICE);
4073                 if (i == 0) {
4074                         tp->tx_buffers[entry].skb = new_skb;
4075                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4076                 } else {
4077                         tp->tx_buffers[entry].skb = NULL;
4078                 }
4079                 entry = NEXT_TX(entry);
4080                 i++;
4081         }
4082
4083         dev_kfree_skb(skb);
4084
4085         return ret;
4086 }
4087
4088 static void tg3_set_txd(struct tg3 *tp, int entry,
4089                         dma_addr_t mapping, int len, u32 flags,
4090                         u32 mss_and_is_end)
4091 {
4092         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4093         int is_end = (mss_and_is_end & 0x1);
4094         u32 mss = (mss_and_is_end >> 1);
4095         u32 vlan_tag = 0;
4096
4097         if (is_end)
4098                 flags |= TXD_FLAG_END;
4099         if (flags & TXD_FLAG_VLAN) {
4100                 vlan_tag = flags >> 16;
4101                 flags &= 0xffff;
4102         }
4103         vlan_tag |= (mss << TXD_MSS_SHIFT);
4104
4105         txd->addr_hi = ((u64) mapping >> 32);
4106         txd->addr_lo = ((u64) mapping & 0xffffffff);
4107         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4108         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4109 }
4110
4111 /* hard_start_xmit for devices that don't have any bugs and
4112  * support TG3_FLG2_HW_TSO_2 only.
4113  */
4114 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4115 {
4116         struct tg3 *tp = netdev_priv(dev);
4117         dma_addr_t mapping;
4118         u32 len, entry, base_flags, mss;
4119
4120         len = skb_headlen(skb);
4121
4122         /* We are running in BH disabled context with netif_tx_lock
4123          * and TX reclaim runs via tp->napi.poll inside of a software
4124          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4125          * no IRQ context deadlocks to worry about either.  Rejoice!
4126          */
4127         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4128                 if (!netif_queue_stopped(dev)) {
4129                         netif_stop_queue(dev);
4130
4131                         /* This is a hard error, log it. */
4132                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4133                                "queue awake!\n", dev->name);
4134                 }
4135                 return NETDEV_TX_BUSY;
4136         }
4137
4138         entry = tp->tx_prod;
4139         base_flags = 0;
4140         mss = 0;
4141         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4142                 int tcp_opt_len, ip_tcp_len;
4143
4144                 if (skb_header_cloned(skb) &&
4145                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4146                         dev_kfree_skb(skb);
4147                         goto out_unlock;
4148                 }
4149
4150                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4151                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4152                 else {
4153                         struct iphdr *iph = ip_hdr(skb);
4154
4155                         tcp_opt_len = tcp_optlen(skb);
4156                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4157
4158                         iph->check = 0;
4159                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4160                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4161                 }
4162
4163                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4164                                TXD_FLAG_CPU_POST_DMA);
4165
4166                 tcp_hdr(skb)->check = 0;
4167
4168         }
4169         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4170                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4171 #if TG3_VLAN_TAG_USED
4172         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4173                 base_flags |= (TXD_FLAG_VLAN |
4174                                (vlan_tx_tag_get(skb) << 16));
4175 #endif
4176
4177         /* Queue skb data, a.k.a. the main skb fragment. */
4178         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4179
4180         tp->tx_buffers[entry].skb = skb;
4181         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4182
4183         tg3_set_txd(tp, entry, mapping, len, base_flags,
4184                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4185
4186         entry = NEXT_TX(entry);
4187
4188         /* Now loop through additional data fragments, and queue them. */
4189         if (skb_shinfo(skb)->nr_frags > 0) {
4190                 unsigned int i, last;
4191
4192                 last = skb_shinfo(skb)->nr_frags - 1;
4193                 for (i = 0; i <= last; i++) {
4194                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4195
4196                         len = frag->size;
4197                         mapping = pci_map_page(tp->pdev,
4198                                                frag->page,
4199                                                frag->page_offset,
4200                                                len, PCI_DMA_TODEVICE);
4201
4202                         tp->tx_buffers[entry].skb = NULL;
4203                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4204
4205                         tg3_set_txd(tp, entry, mapping, len,
4206                                     base_flags, (i == last) | (mss << 1));
4207
4208                         entry = NEXT_TX(entry);
4209                 }
4210         }
4211
4212         /* Packets are ready, update Tx producer idx local and on card. */
4213         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4214
4215         tp->tx_prod = entry;
4216         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4217                 netif_stop_queue(dev);
4218                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4219                         netif_wake_queue(tp->dev);
4220         }
4221
4222 out_unlock:
4223         mmiowb();
4224
4225         dev->trans_start = jiffies;
4226
4227         return NETDEV_TX_OK;
4228 }
4229
4230 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4231
4232 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4233  * TSO header is greater than 80 bytes.
4234  */
4235 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4236 {
4237         struct sk_buff *segs, *nskb;
4238
4239         /* Estimate the number of fragments in the worst case */
4240         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4241                 netif_stop_queue(tp->dev);
4242                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4243                         return NETDEV_TX_BUSY;
4244
4245                 netif_wake_queue(tp->dev);
4246         }
4247
4248         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4249         if (unlikely(IS_ERR(segs)))
4250                 goto tg3_tso_bug_end;
4251
4252         do {
4253                 nskb = segs;
4254                 segs = segs->next;
4255                 nskb->next = NULL;
4256                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4257         } while (segs);
4258
4259 tg3_tso_bug_end:
4260         dev_kfree_skb(skb);
4261
4262         return NETDEV_TX_OK;
4263 }
4264
4265 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4266  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4267  */
4268 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4269 {
4270         struct tg3 *tp = netdev_priv(dev);
4271         dma_addr_t mapping;
4272         u32 len, entry, base_flags, mss;
4273         int would_hit_hwbug;
4274
4275         len = skb_headlen(skb);
4276
4277         /* We are running in BH disabled context with netif_tx_lock
4278          * and TX reclaim runs via tp->napi.poll inside of a software
4279          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4280          * no IRQ context deadlocks to worry about either.  Rejoice!
4281          */
4282         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4283                 if (!netif_queue_stopped(dev)) {
4284                         netif_stop_queue(dev);
4285
4286                         /* This is a hard error, log it. */
4287                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4288                                "queue awake!\n", dev->name);
4289                 }
4290                 return NETDEV_TX_BUSY;
4291         }
4292
4293         entry = tp->tx_prod;
4294         base_flags = 0;
4295         if (skb->ip_summed == CHECKSUM_PARTIAL)
4296                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4297         mss = 0;
4298         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4299                 struct iphdr *iph;
4300                 int tcp_opt_len, ip_tcp_len, hdr_len;
4301
4302                 if (skb_header_cloned(skb) &&
4303                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4304                         dev_kfree_skb(skb);
4305                         goto out_unlock;
4306                 }
4307
4308                 tcp_opt_len = tcp_optlen(skb);
4309                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4310
4311                 hdr_len = ip_tcp_len + tcp_opt_len;
4312                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4313                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4314                         return (tg3_tso_bug(tp, skb));
4315
4316                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4317                                TXD_FLAG_CPU_POST_DMA);
4318
4319                 iph = ip_hdr(skb);
4320                 iph->check = 0;
4321                 iph->tot_len = htons(mss + hdr_len);
4322                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4323                         tcp_hdr(skb)->check = 0;
4324                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4325                 } else
4326                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4327                                                                  iph->daddr, 0,
4328                                                                  IPPROTO_TCP,
4329                                                                  0);
4330
4331                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4332                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4333                         if (tcp_opt_len || iph->ihl > 5) {
4334                                 int tsflags;
4335
4336                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4337                                 mss |= (tsflags << 11);
4338                         }
4339                 } else {
4340                         if (tcp_opt_len || iph->ihl > 5) {
4341                                 int tsflags;
4342
4343                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4344                                 base_flags |= tsflags << 12;
4345                         }
4346                 }
4347         }
4348 #if TG3_VLAN_TAG_USED
4349         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4350                 base_flags |= (TXD_FLAG_VLAN |
4351                                (vlan_tx_tag_get(skb) << 16));
4352 #endif
4353
4354         /* Queue skb data, a.k.a. the main skb fragment. */
4355         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4356
4357         tp->tx_buffers[entry].skb = skb;
4358         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4359
4360         would_hit_hwbug = 0;
4361
4362         if (tg3_4g_overflow_test(mapping, len))
4363                 would_hit_hwbug = 1;
4364
4365         tg3_set_txd(tp, entry, mapping, len, base_flags,
4366                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4367
4368         entry = NEXT_TX(entry);
4369
4370         /* Now loop through additional data fragments, and queue them. */
4371         if (skb_shinfo(skb)->nr_frags > 0) {
4372                 unsigned int i, last;
4373
4374                 last = skb_shinfo(skb)->nr_frags - 1;
4375                 for (i = 0; i <= last; i++) {
4376                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4377
4378                         len = frag->size;
4379                         mapping = pci_map_page(tp->pdev,
4380                                                frag->page,
4381                                                frag->page_offset,
4382                                                len, PCI_DMA_TODEVICE);
4383
4384                         tp->tx_buffers[entry].skb = NULL;
4385                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4386
4387                         if (tg3_4g_overflow_test(mapping, len))
4388                                 would_hit_hwbug = 1;
4389
4390                         if (tg3_40bit_overflow_test(tp, mapping, len))
4391                                 would_hit_hwbug = 1;
4392
4393                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4394                                 tg3_set_txd(tp, entry, mapping, len,
4395                                             base_flags, (i == last)|(mss << 1));
4396                         else
4397                                 tg3_set_txd(tp, entry, mapping, len,
4398                                             base_flags, (i == last));
4399
4400                         entry = NEXT_TX(entry);
4401                 }
4402         }
4403
4404         if (would_hit_hwbug) {
4405                 u32 last_plus_one = entry;
4406                 u32 start;
4407
4408                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4409                 start &= (TG3_TX_RING_SIZE - 1);
4410
4411                 /* If the workaround fails due to memory/mapping
4412                  * failure, silently drop this packet.
4413                  */
4414                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4415                                                 &start, base_flags, mss))
4416                         goto out_unlock;
4417
4418                 entry = start;
4419         }
4420
4421         /* Packets are ready, update Tx producer idx local and on card. */
4422         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4423
4424         tp->tx_prod = entry;
4425         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4426                 netif_stop_queue(dev);
4427                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4428                         netif_wake_queue(tp->dev);
4429         }
4430
4431 out_unlock:
4432         mmiowb();
4433
4434         dev->trans_start = jiffies;
4435
4436         return NETDEV_TX_OK;
4437 }
4438
4439 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4440                                int new_mtu)
4441 {
4442         dev->mtu = new_mtu;
4443
4444         if (new_mtu > ETH_DATA_LEN) {
4445                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4446                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4447                         ethtool_op_set_tso(dev, 0);
4448                 }
4449                 else
4450                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4451         } else {
4452                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4453                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4454                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4455         }
4456 }
4457
4458 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4459 {
4460         struct tg3 *tp = netdev_priv(dev);
4461         int err;
4462
4463         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4464                 return -EINVAL;
4465
4466         if (!netif_running(dev)) {
4467                 /* We'll just catch it later when the
4468                  * device is up'd.
4469                  */
4470                 tg3_set_mtu(dev, tp, new_mtu);
4471                 return 0;
4472         }
4473
4474         tg3_netif_stop(tp);
4475
4476         tg3_full_lock(tp, 1);
4477
4478         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4479
4480         tg3_set_mtu(dev, tp, new_mtu);
4481
4482         err = tg3_restart_hw(tp, 0);
4483
4484         if (!err)
4485                 tg3_netif_start(tp);
4486
4487         tg3_full_unlock(tp);
4488
4489         return err;
4490 }
4491
4492 /* Free up pending packets in all rx/tx rings.
4493  *
4494  * The chip has been shut down and the driver detached from
4495  * the networking, so no interrupts or new tx packets will
4496  * end up in the driver.  tp->{tx,}lock is not held and we are not
4497  * in an interrupt context and thus may sleep.
4498  */
4499 static void tg3_free_rings(struct tg3 *tp)
4500 {
4501         struct ring_info *rxp;
4502         int i;
4503
4504         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4505                 rxp = &tp->rx_std_buffers[i];
4506
4507                 if (rxp->skb == NULL)
4508                         continue;
4509                 pci_unmap_single(tp->pdev,
4510                                  pci_unmap_addr(rxp, mapping),
4511                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4512                                  PCI_DMA_FROMDEVICE);
4513                 dev_kfree_skb_any(rxp->skb);
4514                 rxp->skb = NULL;
4515         }
4516
4517         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4518                 rxp = &tp->rx_jumbo_buffers[i];
4519
4520                 if (rxp->skb == NULL)
4521                         continue;
4522                 pci_unmap_single(tp->pdev,
4523                                  pci_unmap_addr(rxp, mapping),
4524                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4525                                  PCI_DMA_FROMDEVICE);
4526                 dev_kfree_skb_any(rxp->skb);
4527                 rxp->skb = NULL;
4528         }
4529
4530         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4531                 struct tx_ring_info *txp;
4532                 struct sk_buff *skb;
4533                 int j;
4534
4535                 txp = &tp->tx_buffers[i];
4536                 skb = txp->skb;
4537
4538                 if (skb == NULL) {
4539                         i++;
4540                         continue;
4541                 }
4542
4543                 pci_unmap_single(tp->pdev,
4544                                  pci_unmap_addr(txp, mapping),
4545                                  skb_headlen(skb),
4546                                  PCI_DMA_TODEVICE);
4547                 txp->skb = NULL;
4548
4549                 i++;
4550
4551                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4552                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4553                         pci_unmap_page(tp->pdev,
4554                                        pci_unmap_addr(txp, mapping),
4555                                        skb_shinfo(skb)->frags[j].size,
4556                                        PCI_DMA_TODEVICE);
4557                         i++;
4558                 }
4559
4560                 dev_kfree_skb_any(skb);
4561         }
4562 }
4563
4564 /* Initialize tx/rx rings for packet processing.
4565  *
4566  * The chip has been shut down and the driver detached from
4567  * the networking, so no interrupts or new tx packets will
4568  * end up in the driver.  tp->{tx,}lock are held and thus
4569  * we may not sleep.
4570  */
4571 static int tg3_init_rings(struct tg3 *tp)
4572 {
4573         u32 i;
4574
4575         /* Free up all the SKBs. */
4576         tg3_free_rings(tp);
4577
4578         /* Zero out all descriptors. */
4579         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4580         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4581         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4582         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4583
4584         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4585         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4586             (tp->dev->mtu > ETH_DATA_LEN))
4587                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4588
4589         /* Initialize invariants of the rings, we only set this
4590          * stuff once.  This works because the card does not
4591          * write into the rx buffer posting rings.
4592          */
4593         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4594                 struct tg3_rx_buffer_desc *rxd;
4595
4596                 rxd = &tp->rx_std[i];
4597                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4598                         << RXD_LEN_SHIFT;
4599                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4600                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4601                                (i << RXD_OPAQUE_INDEX_SHIFT));
4602         }
4603
4604         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4605                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4606                         struct tg3_rx_buffer_desc *rxd;
4607
4608                         rxd = &tp->rx_jumbo[i];
4609                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4610                                 << RXD_LEN_SHIFT;
4611                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4612                                 RXD_FLAG_JUMBO;
4613                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4614                                (i << RXD_OPAQUE_INDEX_SHIFT));
4615                 }
4616         }
4617
4618         /* Now allocate fresh SKBs for each rx ring. */
4619         for (i = 0; i < tp->rx_pending; i++) {
4620                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4621                         printk(KERN_WARNING PFX
4622                                "%s: Using a smaller RX standard ring, "
4623                                "only %d out of %d buffers were allocated "
4624                                "successfully.\n",
4625                                tp->dev->name, i, tp->rx_pending);
4626                         if (i == 0)
4627                                 return -ENOMEM;
4628                         tp->rx_pending = i;
4629                         break;
4630                 }
4631         }
4632
4633         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4634                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4635                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4636                                              -1, i) < 0) {
4637                                 printk(KERN_WARNING PFX
4638                                        "%s: Using a smaller RX jumbo ring, "
4639                                        "only %d out of %d buffers were "
4640                                        "allocated successfully.\n",
4641                                        tp->dev->name, i, tp->rx_jumbo_pending);
4642                                 if (i == 0) {
4643                                         tg3_free_rings(tp);
4644                                         return -ENOMEM;
4645                                 }
4646                                 tp->rx_jumbo_pending = i;
4647                                 break;
4648                         }
4649                 }
4650         }
4651         return 0;
4652 }
4653
4654 /*
4655  * Must not be invoked with interrupt sources disabled and
4656  * the hardware shutdown down.
4657  */
4658 static void tg3_free_consistent(struct tg3 *tp)
4659 {
4660         kfree(tp->rx_std_buffers);
4661         tp->rx_std_buffers = NULL;
4662         if (tp->rx_std) {
4663                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4664                                     tp->rx_std, tp->rx_std_mapping);
4665                 tp->rx_std = NULL;
4666         }
4667         if (tp->rx_jumbo) {
4668                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4669                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4670                 tp->rx_jumbo = NULL;
4671         }
4672         if (tp->rx_rcb) {
4673                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4674                                     tp->rx_rcb, tp->rx_rcb_mapping);
4675                 tp->rx_rcb = NULL;
4676         }
4677         if (tp->tx_ring) {
4678                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4679                         tp->tx_ring, tp->tx_desc_mapping);
4680                 tp->tx_ring = NULL;
4681         }
4682         if (tp->hw_status) {
4683                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4684                                     tp->hw_status, tp->status_mapping);
4685                 tp->hw_status = NULL;
4686         }
4687         if (tp->hw_stats) {
4688                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4689                                     tp->hw_stats, tp->stats_mapping);
4690                 tp->hw_stats = NULL;
4691         }
4692 }
4693
4694 /*
4695  * Must not be invoked with interrupt sources disabled and
4696  * the hardware shutdown down.  Can sleep.
4697  */
4698 static int tg3_alloc_consistent(struct tg3 *tp)
4699 {
4700         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4701                                       (TG3_RX_RING_SIZE +
4702                                        TG3_RX_JUMBO_RING_SIZE)) +
4703                                      (sizeof(struct tx_ring_info) *
4704                                       TG3_TX_RING_SIZE),
4705                                      GFP_KERNEL);
4706         if (!tp->rx_std_buffers)
4707                 return -ENOMEM;
4708
4709         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4710         tp->tx_buffers = (struct tx_ring_info *)
4711                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4712
4713         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4714                                           &tp->rx_std_mapping);
4715         if (!tp->rx_std)
4716                 goto err_out;
4717
4718         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4719                                             &tp->rx_jumbo_mapping);
4720
4721         if (!tp->rx_jumbo)
4722                 goto err_out;
4723
4724         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4725                                           &tp->rx_rcb_mapping);
4726         if (!tp->rx_rcb)
4727                 goto err_out;
4728
4729         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4730                                            &tp->tx_desc_mapping);
4731         if (!tp->tx_ring)
4732                 goto err_out;
4733
4734         tp->hw_status = pci_alloc_consistent(tp->pdev,
4735                                              TG3_HW_STATUS_SIZE,
4736                                              &tp->status_mapping);
4737         if (!tp->hw_status)
4738                 goto err_out;
4739
4740         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4741                                             sizeof(struct tg3_hw_stats),
4742                                             &tp->stats_mapping);
4743         if (!tp->hw_stats)
4744                 goto err_out;
4745
4746         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4747         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4748
4749         return 0;
4750
4751 err_out:
4752         tg3_free_consistent(tp);
4753         return -ENOMEM;
4754 }
4755
4756 #define MAX_WAIT_CNT 1000
4757
4758 /* To stop a block, clear the enable bit and poll till it
4759  * clears.  tp->lock is held.
4760  */
4761 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4762 {
4763         unsigned int i;
4764         u32 val;
4765
4766         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4767                 switch (ofs) {
4768                 case RCVLSC_MODE:
4769                 case DMAC_MODE:
4770                 case MBFREE_MODE:
4771                 case BUFMGR_MODE:
4772                 case MEMARB_MODE:
4773                         /* We can't enable/disable these bits of the
4774                          * 5705/5750, just say success.
4775                          */
4776                         return 0;
4777
4778                 default:
4779                         break;
4780                 };
4781         }
4782
4783         val = tr32(ofs);
4784         val &= ~enable_bit;
4785         tw32_f(ofs, val);
4786
4787         for (i = 0; i < MAX_WAIT_CNT; i++) {
4788                 udelay(100);
4789                 val = tr32(ofs);
4790                 if ((val & enable_bit) == 0)
4791                         break;
4792         }
4793
4794         if (i == MAX_WAIT_CNT && !silent) {
4795                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4796                        "ofs=%lx enable_bit=%x\n",
4797                        ofs, enable_bit);
4798                 return -ENODEV;
4799         }
4800
4801         return 0;
4802 }
4803
4804 /* tp->lock is held. */
4805 static int tg3_abort_hw(struct tg3 *tp, int silent)
4806 {
4807         int i, err;
4808
4809         tg3_disable_ints(tp);
4810
4811         tp->rx_mode &= ~RX_MODE_ENABLE;
4812         tw32_f(MAC_RX_MODE, tp->rx_mode);
4813         udelay(10);
4814
4815         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4816         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4817         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4818         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4819         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4820         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4821
4822         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4823         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4824         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4825         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4826         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4827         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4828         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4829
4830         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4831         tw32_f(MAC_MODE, tp->mac_mode);
4832         udelay(40);
4833
4834         tp->tx_mode &= ~TX_MODE_ENABLE;
4835         tw32_f(MAC_TX_MODE, tp->tx_mode);
4836
4837         for (i = 0; i < MAX_WAIT_CNT; i++) {
4838                 udelay(100);
4839                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4840                         break;
4841         }
4842         if (i >= MAX_WAIT_CNT) {
4843                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4844                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4845                        tp->dev->name, tr32(MAC_TX_MODE));
4846                 err |= -ENODEV;
4847         }
4848
4849         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4850         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4851         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4852
4853         tw32(FTQ_RESET, 0xffffffff);
4854         tw32(FTQ_RESET, 0x00000000);
4855
4856         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4857         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4858
4859         if (tp->hw_status)
4860                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4861         if (tp->hw_stats)
4862                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4863
4864         return err;
4865 }
4866
4867 /* tp->lock is held. */
4868 static int tg3_nvram_lock(struct tg3 *tp)
4869 {
4870         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4871                 int i;
4872
4873                 if (tp->nvram_lock_cnt == 0) {
4874                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4875                         for (i = 0; i < 8000; i++) {
4876                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4877                                         break;
4878                                 udelay(20);
4879                         }
4880                         if (i == 8000) {
4881                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4882                                 return -ENODEV;
4883                         }
4884                 }
4885                 tp->nvram_lock_cnt++;
4886         }
4887         return 0;
4888 }
4889
4890 /* tp->lock is held. */
4891 static void tg3_nvram_unlock(struct tg3 *tp)
4892 {
4893         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4894                 if (tp->nvram_lock_cnt > 0)
4895                         tp->nvram_lock_cnt--;
4896                 if (tp->nvram_lock_cnt == 0)
4897                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4898         }
4899 }
4900
4901 /* tp->lock is held. */
4902 static void tg3_enable_nvram_access(struct tg3 *tp)
4903 {
4904         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4905             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4906                 u32 nvaccess = tr32(NVRAM_ACCESS);
4907
4908                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4909         }
4910 }
4911
4912 /* tp->lock is held. */
4913 static void tg3_disable_nvram_access(struct tg3 *tp)
4914 {
4915         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4916             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4917                 u32 nvaccess = tr32(NVRAM_ACCESS);
4918
4919                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4920         }
4921 }
4922
4923 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
4924 {
4925         int i;
4926         u32 apedata;
4927
4928         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
4929         if (apedata != APE_SEG_SIG_MAGIC)
4930                 return;
4931
4932         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
4933         if (apedata != APE_FW_STATUS_READY)
4934                 return;
4935
4936         /* Wait for up to 1 millisecond for APE to service previous event. */
4937         for (i = 0; i < 10; i++) {
4938                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
4939                         return;
4940
4941                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
4942
4943                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4944                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
4945                                         event | APE_EVENT_STATUS_EVENT_PENDING);
4946
4947                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
4948
4949                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4950                         break;
4951
4952                 udelay(100);
4953         }
4954
4955         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4956                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
4957 }
4958
4959 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
4960 {
4961         u32 event;
4962         u32 apedata;
4963
4964         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
4965                 return;
4966
4967         switch (kind) {
4968                 case RESET_KIND_INIT:
4969                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
4970                                         APE_HOST_SEG_SIG_MAGIC);
4971                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
4972                                         APE_HOST_SEG_LEN_MAGIC);
4973                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
4974                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
4975                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
4976                                         APE_HOST_DRIVER_ID_MAGIC);
4977                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
4978                                         APE_HOST_BEHAV_NO_PHYLOCK);
4979
4980                         event = APE_EVENT_STATUS_STATE_START;
4981                         break;
4982                 case RESET_KIND_SHUTDOWN:
4983                         event = APE_EVENT_STATUS_STATE_UNLOAD;
4984                         break;
4985                 case RESET_KIND_SUSPEND:
4986                         event = APE_EVENT_STATUS_STATE_SUSPEND;
4987                         break;
4988                 default:
4989                         return;
4990         }
4991
4992         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
4993
4994         tg3_ape_send_event(tp, event);
4995 }
4996
4997 /* tp->lock is held. */
4998 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4999 {
5000         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5001                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5002
5003         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5004                 switch (kind) {
5005                 case RESET_KIND_INIT:
5006                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5007                                       DRV_STATE_START);
5008                         break;
5009
5010                 case RESET_KIND_SHUTDOWN:
5011                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5012                                       DRV_STATE_UNLOAD);
5013                         break;
5014
5015                 case RESET_KIND_SUSPEND:
5016                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5017                                       DRV_STATE_SUSPEND);
5018                         break;
5019
5020                 default:
5021                         break;
5022                 };
5023         }
5024
5025         if (kind == RESET_KIND_INIT ||
5026             kind == RESET_KIND_SUSPEND)
5027                 tg3_ape_driver_state_change(tp, kind);
5028 }
5029
5030 /* tp->lock is held. */
5031 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5032 {
5033         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5034                 switch (kind) {
5035                 case RESET_KIND_INIT:
5036                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5037                                       DRV_STATE_START_DONE);
5038                         break;
5039
5040                 case RESET_KIND_SHUTDOWN:
5041                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5042                                       DRV_STATE_UNLOAD_DONE);
5043                         break;
5044
5045                 default:
5046                         break;
5047                 };
5048         }
5049
5050         if (kind == RESET_KIND_SHUTDOWN)
5051                 tg3_ape_driver_state_change(tp, kind);
5052 }
5053
5054 /* tp->lock is held. */
5055 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5056 {
5057         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5058                 switch (kind) {
5059                 case RESET_KIND_INIT:
5060                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5061                                       DRV_STATE_START);
5062                         break;
5063
5064                 case RESET_KIND_SHUTDOWN:
5065                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5066                                       DRV_STATE_UNLOAD);
5067                         break;
5068
5069                 case RESET_KIND_SUSPEND:
5070                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5071                                       DRV_STATE_SUSPEND);
5072                         break;
5073
5074                 default:
5075                         break;
5076                 };
5077         }
5078 }
5079
5080 static int tg3_poll_fw(struct tg3 *tp)
5081 {
5082         int i;
5083         u32 val;
5084
5085         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5086                 /* Wait up to 20ms for init done. */
5087                 for (i = 0; i < 200; i++) {
5088                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5089                                 return 0;
5090                         udelay(100);
5091                 }
5092                 return -ENODEV;
5093         }
5094
5095         /* Wait for firmware initialization to complete. */
5096         for (i = 0; i < 100000; i++) {
5097                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5098                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5099                         break;
5100                 udelay(10);
5101         }
5102
5103         /* Chip might not be fitted with firmware.  Some Sun onboard
5104          * parts are configured like that.  So don't signal the timeout
5105          * of the above loop as an error, but do report the lack of
5106          * running firmware once.
5107          */
5108         if (i >= 100000 &&
5109             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5110                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5111
5112                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5113                        tp->dev->name);
5114         }
5115
5116         return 0;
5117 }
5118
5119 /* Save PCI command register before chip reset */
5120 static void tg3_save_pci_state(struct tg3 *tp)
5121 {
5122         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5123 }
5124
5125 /* Restore PCI state after chip reset */
5126 static void tg3_restore_pci_state(struct tg3 *tp)
5127 {
5128         u32 val;
5129
5130         /* Re-enable indirect register accesses. */
5131         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5132                                tp->misc_host_ctrl);
5133
5134         /* Set MAX PCI retry to zero. */
5135         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5136         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5137             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5138                 val |= PCISTATE_RETRY_SAME_DMA;
5139         /* Allow reads and writes to the APE register and memory space. */
5140         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5141                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5142                        PCISTATE_ALLOW_APE_SHMEM_WR;
5143         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5144
5145         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5146
5147         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5148                 pcie_set_readrq(tp->pdev, 4096);
5149         else {
5150                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5151                                       tp->pci_cacheline_sz);
5152                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5153                                       tp->pci_lat_timer);
5154         }
5155
5156         /* Make sure PCI-X relaxed ordering bit is clear. */
5157         if (tp->pcix_cap) {
5158                 u16 pcix_cmd;
5159
5160                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5161                                      &pcix_cmd);
5162                 pcix_cmd &= ~PCI_X_CMD_ERO;
5163                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5164                                       pcix_cmd);
5165         }
5166
5167         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5168
5169                 /* Chip reset on 5780 will reset MSI enable bit,
5170                  * so need to restore it.
5171                  */
5172                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5173                         u16 ctrl;
5174
5175                         pci_read_config_word(tp->pdev,
5176                                              tp->msi_cap + PCI_MSI_FLAGS,
5177                                              &ctrl);
5178                         pci_write_config_word(tp->pdev,
5179                                               tp->msi_cap + PCI_MSI_FLAGS,
5180                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5181                         val = tr32(MSGINT_MODE);
5182                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5183                 }
5184         }
5185 }
5186
5187 static void tg3_stop_fw(struct tg3 *);
5188
5189 /* tp->lock is held. */
5190 static int tg3_chip_reset(struct tg3 *tp)
5191 {
5192         u32 val;
5193         void (*write_op)(struct tg3 *, u32, u32);
5194         int err;
5195
5196         tg3_nvram_lock(tp);
5197
5198         /* No matching tg3_nvram_unlock() after this because
5199          * chip reset below will undo the nvram lock.
5200          */
5201         tp->nvram_lock_cnt = 0;
5202
5203         /* GRC_MISC_CFG core clock reset will clear the memory
5204          * enable bit in PCI register 4 and the MSI enable bit
5205          * on some chips, so we save relevant registers here.
5206          */
5207         tg3_save_pci_state(tp);
5208
5209         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5210             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5211             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5212             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5213             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
5214                 tw32(GRC_FASTBOOT_PC, 0);
5215
5216         /*
5217          * We must avoid the readl() that normally takes place.
5218          * It locks machines, causes machine checks, and other
5219          * fun things.  So, temporarily disable the 5701
5220          * hardware workaround, while we do the reset.
5221          */
5222         write_op = tp->write32;
5223         if (write_op == tg3_write_flush_reg32)
5224                 tp->write32 = tg3_write32;
5225
5226         /* Prevent the irq handler from reading or writing PCI registers
5227          * during chip reset when the memory enable bit in the PCI command
5228          * register may be cleared.  The chip does not generate interrupt
5229          * at this time, but the irq handler may still be called due to irq
5230          * sharing or irqpoll.
5231          */
5232         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5233         if (tp->hw_status) {
5234                 tp->hw_status->status = 0;
5235                 tp->hw_status->status_tag = 0;
5236         }
5237         tp->last_tag = 0;
5238         smp_mb();
5239         synchronize_irq(tp->pdev->irq);
5240
5241         /* do the reset */
5242         val = GRC_MISC_CFG_CORECLK_RESET;
5243
5244         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5245                 if (tr32(0x7e2c) == 0x60) {
5246                         tw32(0x7e2c, 0x20);
5247                 }
5248                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5249                         tw32(GRC_MISC_CFG, (1 << 29));
5250                         val |= (1 << 29);
5251                 }
5252         }
5253
5254         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5255                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5256                 tw32(GRC_VCPU_EXT_CTRL,
5257                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5258         }
5259
5260         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5261                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5262         tw32(GRC_MISC_CFG, val);
5263
5264         /* restore 5701 hardware bug workaround write method */
5265         tp->write32 = write_op;
5266
5267         /* Unfortunately, we have to delay before the PCI read back.
5268          * Some 575X chips even will not respond to a PCI cfg access
5269          * when the reset command is given to the chip.
5270          *
5271          * How do these hardware designers expect things to work
5272          * properly if the PCI write is posted for a long period
5273          * of time?  It is always necessary to have some method by
5274          * which a register read back can occur to push the write
5275          * out which does the reset.
5276          *
5277          * For most tg3 variants the trick below was working.
5278          * Ho hum...
5279          */
5280         udelay(120);
5281
5282         /* Flush PCI posted writes.  The normal MMIO registers
5283          * are inaccessible at this time so this is the only
5284          * way to make this reliably (actually, this is no longer
5285          * the case, see above).  I tried to use indirect
5286          * register read/write but this upset some 5701 variants.
5287          */
5288         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5289
5290         udelay(120);
5291
5292         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5293                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5294                         int i;
5295                         u32 cfg_val;
5296
5297                         /* Wait for link training to complete.  */
5298                         for (i = 0; i < 5000; i++)
5299                                 udelay(100);
5300
5301                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5302                         pci_write_config_dword(tp->pdev, 0xc4,
5303                                                cfg_val | (1 << 15));
5304                 }
5305                 /* Set PCIE max payload size and clear error status.  */
5306                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5307         }
5308
5309         tg3_restore_pci_state(tp);
5310
5311         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5312
5313         val = 0;
5314         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5315                 val = tr32(MEMARB_MODE);
5316         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5317
5318         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5319                 tg3_stop_fw(tp);
5320                 tw32(0x5000, 0x400);
5321         }
5322
5323         tw32(GRC_MODE, tp->grc_mode);
5324
5325         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5326                 val = tr32(0xc4);
5327
5328                 tw32(0xc4, val | (1 << 15));
5329         }
5330
5331         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5332             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5333                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5334                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5335                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5336                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5337         }
5338
5339         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5340                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5341                 tw32_f(MAC_MODE, tp->mac_mode);
5342         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5343                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5344                 tw32_f(MAC_MODE, tp->mac_mode);
5345         } else
5346                 tw32_f(MAC_MODE, 0);
5347         udelay(40);
5348
5349         err = tg3_poll_fw(tp);
5350         if (err)
5351                 return err;
5352
5353         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5354             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5355                 val = tr32(0x7c00);
5356
5357                 tw32(0x7c00, val | (1 << 25));
5358         }
5359
5360         /* Reprobe ASF enable state.  */
5361         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5362         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5363         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5364         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5365                 u32 nic_cfg;
5366
5367                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5368                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5369                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5370                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5371                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5372                 }
5373         }
5374
5375         return 0;
5376 }
5377
5378 /* tp->lock is held. */
5379 static void tg3_stop_fw(struct tg3 *tp)
5380 {
5381         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5382            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5383                 u32 val;
5384                 int i;
5385
5386                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5387                 val = tr32(GRC_RX_CPU_EVENT);
5388                 val |= (1 << 14);
5389                 tw32(GRC_RX_CPU_EVENT, val);
5390
5391                 /* Wait for RX cpu to ACK the event.  */
5392                 for (i = 0; i < 100; i++) {
5393                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5394                                 break;
5395                         udelay(1);
5396                 }
5397         }
5398 }
5399
5400 /* tp->lock is held. */
5401 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5402 {
5403         int err;
5404
5405         tg3_stop_fw(tp);
5406
5407         tg3_write_sig_pre_reset(tp, kind);
5408
5409         tg3_abort_hw(tp, silent);
5410         err = tg3_chip_reset(tp);
5411
5412         tg3_write_sig_legacy(tp, kind);
5413         tg3_write_sig_post_reset(tp, kind);
5414
5415         if (err)
5416                 return err;
5417
5418         return 0;
5419 }
5420
5421 #define TG3_FW_RELEASE_MAJOR    0x0
5422 #define TG3_FW_RELASE_MINOR     0x0
5423 #define TG3_FW_RELEASE_FIX      0x0
5424 #define TG3_FW_START_ADDR       0x08000000
5425 #define TG3_FW_TEXT_ADDR        0x08000000
5426 #define TG3_FW_TEXT_LEN         0x9c0
5427 #define TG3_FW_RODATA_ADDR      0x080009c0
5428 #define TG3_FW_RODATA_LEN       0x60
5429 #define TG3_FW_DATA_ADDR        0x08000a40
5430 #define TG3_FW_DATA_LEN         0x20
5431 #define TG3_FW_SBSS_ADDR        0x08000a60
5432 #define TG3_FW_SBSS_LEN         0xc
5433 #define TG3_FW_BSS_ADDR         0x08000a70
5434 #define TG3_FW_BSS_LEN          0x10
5435
5436 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5437         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5438         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5439         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5440         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5441         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5442         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5443         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5444         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5445         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5446         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5447         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5448         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5449         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5450         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5451         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5452         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5453         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5454         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5455         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5456         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5457         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5458         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5459         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5460         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5461         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5462         0, 0, 0, 0, 0, 0,
5463         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5464         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5465         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5466         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5467         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5468         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5469         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5470         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5471         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5472         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5473         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5474         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5475         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5476         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5477         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5478         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5479         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5480         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5481         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5482         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5483         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5484         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5485         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5486         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5487         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5488         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5489         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5490         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5491         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5492         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5493         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5494         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5495         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5496         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5497         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5498         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5499         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5500         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5501         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5502         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5503         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5504         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5505         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5506         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5507         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5508         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5509         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5510         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5511         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5512         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5513         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5514         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5515         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5516         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5517         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5518         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5519         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5520         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5521         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5522         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5523         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5524         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5525         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5526         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5527         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5528 };
5529
5530 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5531         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5532         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5533         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5534         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5535         0x00000000
5536 };
5537
5538 #if 0 /* All zeros, don't eat up space with it. */
5539 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5540         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5541         0x00000000, 0x00000000, 0x00000000, 0x00000000
5542 };
5543 #endif
5544
5545 #define RX_CPU_SCRATCH_BASE     0x30000
5546 #define RX_CPU_SCRATCH_SIZE     0x04000
5547 #define TX_CPU_SCRATCH_BASE     0x34000
5548 #define TX_CPU_SCRATCH_SIZE     0x04000
5549
5550 /* tp->lock is held. */
5551 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5552 {
5553         int i;
5554
5555         BUG_ON(offset == TX_CPU_BASE &&
5556             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5557
5558         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5559                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5560
5561                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5562                 return 0;
5563         }
5564         if (offset == RX_CPU_BASE) {
5565                 for (i = 0; i < 10000; i++) {
5566                         tw32(offset + CPU_STATE, 0xffffffff);
5567                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5568                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5569                                 break;
5570                 }
5571
5572                 tw32(offset + CPU_STATE, 0xffffffff);
5573                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5574                 udelay(10);
5575         } else {
5576                 for (i = 0; i < 10000; i++) {
5577                         tw32(offset + CPU_STATE, 0xffffffff);
5578                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5579                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5580                                 break;
5581                 }
5582         }
5583
5584         if (i >= 10000) {
5585                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5586                        "and %s CPU\n",
5587                        tp->dev->name,
5588                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5589                 return -ENODEV;
5590         }
5591
5592         /* Clear firmware's nvram arbitration. */
5593         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5594                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5595         return 0;
5596 }
5597
5598 struct fw_info {
5599         unsigned int text_base;
5600         unsigned int text_len;
5601         const u32 *text_data;
5602         unsigned int rodata_base;
5603         unsigned int rodata_len;
5604         const u32 *rodata_data;
5605         unsigned int data_base;
5606         unsigned int data_len;
5607         const u32 *data_data;
5608 };
5609
5610 /* tp->lock is held. */
5611 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5612                                  int cpu_scratch_size, struct fw_info *info)
5613 {
5614         int err, lock_err, i;
5615         void (*write_op)(struct tg3 *, u32, u32);
5616
5617         if (cpu_base == TX_CPU_BASE &&
5618             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5619                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5620                        "TX cpu firmware on %s which is 5705.\n",
5621                        tp->dev->name);
5622                 return -EINVAL;
5623         }
5624
5625         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5626                 write_op = tg3_write_mem;
5627         else
5628                 write_op = tg3_write_indirect_reg32;
5629
5630         /* It is possible that bootcode is still loading at this point.
5631          * Get the nvram lock first before halting the cpu.
5632          */
5633         lock_err = tg3_nvram_lock(tp);
5634         err = tg3_halt_cpu(tp, cpu_base);
5635         if (!lock_err)
5636                 tg3_nvram_unlock(tp);
5637         if (err)
5638                 goto out;
5639
5640         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5641                 write_op(tp, cpu_scratch_base + i, 0);
5642         tw32(cpu_base + CPU_STATE, 0xffffffff);
5643         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5644         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5645                 write_op(tp, (cpu_scratch_base +
5646                               (info->text_base & 0xffff) +
5647                               (i * sizeof(u32))),
5648                          (info->text_data ?
5649                           info->text_data[i] : 0));
5650         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5651                 write_op(tp, (cpu_scratch_base +
5652                               (info->rodata_base & 0xffff) +
5653                               (i * sizeof(u32))),
5654                          (info->rodata_data ?
5655                           info->rodata_data[i] : 0));
5656         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5657                 write_op(tp, (cpu_scratch_base +
5658                               (info->data_base & 0xffff) +
5659                               (i * sizeof(u32))),
5660                          (info->data_data ?
5661                           info->data_data[i] : 0));
5662
5663         err = 0;
5664
5665 out:
5666         return err;
5667 }
5668
5669 /* tp->lock is held. */
5670 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5671 {
5672         struct fw_info info;
5673         int err, i;
5674
5675         info.text_base = TG3_FW_TEXT_ADDR;
5676         info.text_len = TG3_FW_TEXT_LEN;
5677         info.text_data = &tg3FwText[0];
5678         info.rodata_base = TG3_FW_RODATA_ADDR;
5679         info.rodata_len = TG3_FW_RODATA_LEN;
5680         info.rodata_data = &tg3FwRodata[0];
5681         info.data_base = TG3_FW_DATA_ADDR;
5682         info.data_len = TG3_FW_DATA_LEN;
5683         info.data_data = NULL;
5684
5685         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5686                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5687                                     &info);
5688         if (err)
5689                 return err;
5690
5691         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5692                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5693                                     &info);
5694         if (err)
5695                 return err;
5696
5697         /* Now startup only the RX cpu. */
5698         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5699         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5700
5701         for (i = 0; i < 5; i++) {
5702                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5703                         break;
5704                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5705                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5706                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5707                 udelay(1000);
5708         }
5709         if (i >= 5) {
5710                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5711                        "to set RX CPU PC, is %08x should be %08x\n",
5712                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5713                        TG3_FW_TEXT_ADDR);
5714                 return -ENODEV;
5715         }
5716         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5717         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5718
5719         return 0;
5720 }
5721
5722
5723 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5724 #define TG3_TSO_FW_RELASE_MINOR         0x6
5725 #define TG3_TSO_FW_RELEASE_FIX          0x0
5726 #define TG3_TSO_FW_START_ADDR           0x08000000
5727 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5728 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5729 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5730 #define TG3_TSO_FW_RODATA_LEN           0x60
5731 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5732 #define TG3_TSO_FW_DATA_LEN             0x30
5733 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5734 #define TG3_TSO_FW_SBSS_LEN             0x2c
5735 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5736 #define TG3_TSO_FW_BSS_LEN              0x894
5737
5738 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5739         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5740         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5741         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5742         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5743         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5744         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5745         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5746         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5747         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5748         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5749         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5750         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5751         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5752         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5753         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5754         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5755         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5756         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5757         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5758         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5759         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5760         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5761         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5762         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5763         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5764         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5765         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5766         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5767         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5768         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5769         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5770         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5771         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5772         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5773         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5774         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5775         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5776         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5777         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5778         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5779         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5780         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5781         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5782         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5783         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5784         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5785         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5786         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5787         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5788         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5789         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5790         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5791         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5792         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5793         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5794         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5795         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5796         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5797         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5798         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5799         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5800         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5801         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5802         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5803         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5804         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5805         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5806         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5807         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5808         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5809         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5810         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5811         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5812         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5813         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5814         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5815         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5816         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5817         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5818         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5819         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5820         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5821         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5822         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5823         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5824         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5825         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5826         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5827         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5828         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5829         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5830         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5831         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5832         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5833         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5834         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5835         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5836         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5837         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5838         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5839         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5840         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5841         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5842         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5843         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5844         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5845         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5846         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5847         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5848         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5849         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5850         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5851         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5852         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5853         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5854         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5855         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5856         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5857         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5858         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5859         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5860         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5861         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5862         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5863         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5864         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5865         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5866         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5867         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5868         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5869         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5870         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5871         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5872         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5873         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5874         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5875         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5876         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5877         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5878         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5879         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5880         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5881         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5882         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5883         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5884         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5885         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5886         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5887         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5888         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5889         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5890         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5891         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5892         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5893         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5894         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5895         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5896         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5897         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5898         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5899         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5900         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5901         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5902         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5903         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5904         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5905         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5906         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5907         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5908         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5909         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5910         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5911         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5912         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5913         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5914         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5915         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5916         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5917         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5918         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5919         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5920         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5921         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5922         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5923         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5924         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5925         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5926         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5927         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5928         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5929         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5930         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5931         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5932         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5933         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5934         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5935         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5936         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5937         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5938         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5939         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5940         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5941         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5942         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5943         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5944         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5945         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5946         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5947         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5948         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5949         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5950         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5951         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5952         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5953         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5954         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5955         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5956         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5957         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5958         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5959         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5960         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5961         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5962         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5963         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5964         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5965         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5966         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5967         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5968         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5969         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5970         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5971         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5972         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5973         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5974         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5975         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5976         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5977         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5978         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5979         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5980         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5981         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5982         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5983         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5984         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5985         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5986         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5987         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5988         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5989         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5990         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5991         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5992         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5993         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5994         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5995         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5996         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5997         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5998         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5999         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6000         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6001         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6002         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6003         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6004         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6005         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6006         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6007         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6008         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6009         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6010         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6011         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6012         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6013         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6014         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6015         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6016         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6017         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6018         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6019         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6020         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6021         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6022         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6023 };
6024
6025 static const u32 tg3TsoFwRodata[] = {
6026         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6027         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6028         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6029         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6030         0x00000000,
6031 };
6032
6033 static const u32 tg3TsoFwData[] = {
6034         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6035         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6036         0x00000000,
6037 };
6038
6039 /* 5705 needs a special version of the TSO firmware.  */
6040 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
6041 #define TG3_TSO5_FW_RELASE_MINOR        0x2
6042 #define TG3_TSO5_FW_RELEASE_FIX         0x0
6043 #define TG3_TSO5_FW_START_ADDR          0x00010000
6044 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
6045 #define TG3_TSO5_FW_TEXT_LEN            0xe90
6046 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
6047 #define TG3_TSO5_FW_RODATA_LEN          0x50
6048 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
6049 #define TG3_TSO5_FW_DATA_LEN            0x20
6050 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
6051 #define TG3_TSO5_FW_SBSS_LEN            0x28
6052 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
6053 #define TG3_TSO5_FW_BSS_LEN             0x88
6054
6055 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6056         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6057         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6058         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6059         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6060         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6061         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6062         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6063         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6064         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6065         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6066         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6067         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6068         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6069         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6070         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6071         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6072         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6073         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6074         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6075         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6076         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6077         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6078         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6079         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6080         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6081         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6082         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6083         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6084         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6085         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6086         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6087         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6088         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6089         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6090         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6091         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6092         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6093         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6094         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6095         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6096         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6097         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6098         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6099         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6100         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6101         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6102         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6103         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6104         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6105         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6106         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6107         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6108         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6109         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6110         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6111         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6112         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6113         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6114         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6115         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6116         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6117         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6118         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6119         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6120         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6121         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6122         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6123         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6124         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6125         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6126         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6127         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6128         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6129         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6130         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6131         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6132         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6133         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6134         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6135         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6136         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6137         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6138         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6139         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6140         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6141         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6142         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6143         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6144         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6145         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6146         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6147         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6148         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6149         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6150         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6151         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6152         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6153         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6154         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6155         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6156         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6157         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6158         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6159         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6160         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6161         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6162         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6163         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6164         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6165         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6166         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6167         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6168         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6169         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6170         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6171         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6172         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6173         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6174         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6175         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6176         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6177         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6178         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6179         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6180         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6181         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6182         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6183         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6184         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6185         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6186         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6187         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6188         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6189         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6190         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6191         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6192         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6193         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6194         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6195         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6196         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6197         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6198         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6199         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6200         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6201         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6202         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6203         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6204         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6205         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6206         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6207         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6208         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6209         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6210         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6211         0x00000000, 0x00000000, 0x00000000,
6212 };
6213
6214 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6215         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6216         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6217         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6218         0x00000000, 0x00000000, 0x00000000,
6219 };
6220
6221 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6222         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6223         0x00000000, 0x00000000, 0x00000000,
6224 };
6225
6226 /* tp->lock is held. */
6227 static int tg3_load_tso_firmware(struct tg3 *tp)
6228 {
6229         struct fw_info info;
6230         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6231         int err, i;
6232
6233         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6234                 return 0;
6235
6236         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6237                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6238                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6239                 info.text_data = &tg3Tso5FwText[0];
6240                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6241                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6242                 info.rodata_data = &tg3Tso5FwRodata[0];
6243                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6244                 info.data_len = TG3_TSO5_FW_DATA_LEN;
6245                 info.data_data = &tg3Tso5FwData[0];
6246                 cpu_base = RX_CPU_BASE;
6247                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6248                 cpu_scratch_size = (info.text_len +
6249                                     info.rodata_len +
6250                                     info.data_len +
6251                                     TG3_TSO5_FW_SBSS_LEN +
6252                                     TG3_TSO5_FW_BSS_LEN);
6253         } else {
6254                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6255                 info.text_len = TG3_TSO_FW_TEXT_LEN;
6256                 info.text_data = &tg3TsoFwText[0];
6257                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6258                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6259                 info.rodata_data = &tg3TsoFwRodata[0];
6260                 info.data_base = TG3_TSO_FW_DATA_ADDR;
6261                 info.data_len = TG3_TSO_FW_DATA_LEN;
6262                 info.data_data = &tg3TsoFwData[0];
6263                 cpu_base = TX_CPU_BASE;
6264                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6265                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6266         }
6267
6268         err = tg3_load_firmware_cpu(tp, cpu_base,
6269                                     cpu_scratch_base, cpu_scratch_size,
6270                                     &info);
6271         if (err)
6272                 return err;
6273
6274         /* Now startup the cpu. */
6275         tw32(cpu_base + CPU_STATE, 0xffffffff);
6276         tw32_f(cpu_base + CPU_PC,    info.text_base);
6277
6278         for (i = 0; i < 5; i++) {
6279                 if (tr32(cpu_base + CPU_PC) == info.text_base)
6280                         break;
6281                 tw32(cpu_base + CPU_STATE, 0xffffffff);
6282                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
6283                 tw32_f(cpu_base + CPU_PC,    info.text_base);
6284                 udelay(1000);
6285         }
6286         if (i >= 5) {
6287                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6288                        "to set CPU PC, is %08x should be %08x\n",
6289                        tp->dev->name, tr32(cpu_base + CPU_PC),
6290                        info.text_base);
6291                 return -ENODEV;
6292         }
6293         tw32(cpu_base + CPU_STATE, 0xffffffff);
6294         tw32_f(cpu_base + CPU_MODE,  0x00000000);
6295         return 0;
6296 }
6297
6298
6299 /* tp->lock is held. */
6300 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6301 {
6302         u32 addr_high, addr_low;
6303         int i;
6304
6305         addr_high = ((tp->dev->dev_addr[0] << 8) |
6306                      tp->dev->dev_addr[1]);
6307         addr_low = ((tp->dev->dev_addr[2] << 24) |
6308                     (tp->dev->dev_addr[3] << 16) |
6309                     (tp->dev->dev_addr[4] <<  8) |
6310                     (tp->dev->dev_addr[5] <<  0));
6311         for (i = 0; i < 4; i++) {
6312                 if (i == 1 && skip_mac_1)
6313                         continue;
6314                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6315                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6316         }
6317
6318         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6319             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6320                 for (i = 0; i < 12; i++) {
6321                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6322                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6323                 }
6324         }
6325
6326         addr_high = (tp->dev->dev_addr[0] +
6327                      tp->dev->dev_addr[1] +
6328                      tp->dev->dev_addr[2] +
6329                      tp->dev->dev_addr[3] +
6330                      tp->dev->dev_addr[4] +
6331                      tp->dev->dev_addr[5]) &
6332                 TX_BACKOFF_SEED_MASK;
6333         tw32(MAC_TX_BACKOFF_SEED, addr_high);
6334 }
6335
6336 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6337 {
6338         struct tg3 *tp = netdev_priv(dev);
6339         struct sockaddr *addr = p;
6340         int err = 0, skip_mac_1 = 0;
6341
6342         if (!is_valid_ether_addr(addr->sa_data))
6343                 return -EINVAL;
6344
6345         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6346
6347         if (!netif_running(dev))
6348                 return 0;
6349
6350         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6351                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6352
6353                 addr0_high = tr32(MAC_ADDR_0_HIGH);
6354                 addr0_low = tr32(MAC_ADDR_0_LOW);
6355                 addr1_high = tr32(MAC_ADDR_1_HIGH);
6356                 addr1_low = tr32(MAC_ADDR_1_LOW);
6357
6358                 /* Skip MAC addr 1 if ASF is using it. */
6359                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6360                     !(addr1_high == 0 && addr1_low == 0))
6361                         skip_mac_1 = 1;
6362         }
6363         spin_lock_bh(&tp->lock);
6364         __tg3_set_mac_addr(tp, skip_mac_1);
6365         spin_unlock_bh(&tp->lock);
6366
6367         return err;
6368 }
6369
6370 /* tp->lock is held. */
6371 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6372                            dma_addr_t mapping, u32 maxlen_flags,
6373                            u32 nic_addr)
6374 {
6375         tg3_write_mem(tp,
6376                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6377                       ((u64) mapping >> 32));
6378         tg3_write_mem(tp,
6379                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6380                       ((u64) mapping & 0xffffffff));
6381         tg3_write_mem(tp,
6382                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6383                        maxlen_flags);
6384
6385         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6386                 tg3_write_mem(tp,
6387                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6388                               nic_addr);
6389 }
6390
6391 static void __tg3_set_rx_mode(struct net_device *);
6392 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6393 {
6394         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6395         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6396         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6397         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6398         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6399                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6400                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6401         }
6402         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6403         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6404         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6405                 u32 val = ec->stats_block_coalesce_usecs;
6406
6407                 if (!netif_carrier_ok(tp->dev))
6408                         val = 0;
6409
6410                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6411         }
6412 }
6413
6414 /* tp->lock is held. */
6415 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6416 {
6417         u32 val, rdmac_mode;
6418         int i, err, limit;
6419
6420         tg3_disable_ints(tp);
6421
6422         tg3_stop_fw(tp);
6423
6424         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6425
6426         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6427                 tg3_abort_hw(tp, 1);
6428         }
6429
6430         if (reset_phy)
6431                 tg3_phy_reset(tp);
6432
6433         err = tg3_chip_reset(tp);
6434         if (err)
6435                 return err;
6436
6437         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6438
6439         if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
6440             tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
6441                 val = tr32(TG3_CPMU_CTRL);
6442                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6443                 tw32(TG3_CPMU_CTRL, val);
6444
6445                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
6446                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
6447                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
6448                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
6449
6450                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
6451                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
6452                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
6453                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
6454
6455                 val = tr32(TG3_CPMU_HST_ACC);
6456                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
6457                 val |= CPMU_HST_ACC_MACCLK_6_25;
6458                 tw32(TG3_CPMU_HST_ACC, val);
6459         }
6460
6461         /* This works around an issue with Athlon chipsets on
6462          * B3 tigon3 silicon.  This bit has no effect on any
6463          * other revision.  But do not set this on PCI Express
6464          * chips and don't even touch the clocks if the CPMU is present.
6465          */
6466         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6467                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6468                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6469                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6470         }
6471
6472         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6473             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6474                 val = tr32(TG3PCI_PCISTATE);
6475                 val |= PCISTATE_RETRY_SAME_DMA;
6476                 tw32(TG3PCI_PCISTATE, val);
6477         }
6478
6479         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6480                 /* Allow reads and writes to the
6481                  * APE register and memory space.
6482                  */
6483                 val = tr32(TG3PCI_PCISTATE);
6484                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6485                        PCISTATE_ALLOW_APE_SHMEM_WR;
6486                 tw32(TG3PCI_PCISTATE, val);
6487         }
6488
6489         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6490                 /* Enable some hw fixes.  */
6491                 val = tr32(TG3PCI_MSI_DATA);
6492                 val |= (1 << 26) | (1 << 28) | (1 << 29);
6493                 tw32(TG3PCI_MSI_DATA, val);
6494         }
6495
6496         /* Descriptor ring init may make accesses to the
6497          * NIC SRAM area to setup the TX descriptors, so we
6498          * can only do this after the hardware has been
6499          * successfully reset.
6500          */
6501         err = tg3_init_rings(tp);
6502         if (err)
6503                 return err;
6504
6505         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6506             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
6507                 /* This value is determined during the probe time DMA
6508                  * engine test, tg3_test_dma.
6509                  */
6510                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6511         }
6512
6513         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6514                           GRC_MODE_4X_NIC_SEND_RINGS |
6515                           GRC_MODE_NO_TX_PHDR_CSUM |
6516                           GRC_MODE_NO_RX_PHDR_CSUM);
6517         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6518
6519         /* Pseudo-header checksum is done by hardware logic and not
6520          * the offload processers, so make the chip do the pseudo-
6521          * header checksums on receive.  For transmit it is more
6522          * convenient to do the pseudo-header checksum in software
6523          * as Linux does that on transmit for us in all cases.
6524          */
6525         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6526
6527         tw32(GRC_MODE,
6528              tp->grc_mode |
6529              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6530
6531         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6532         val = tr32(GRC_MISC_CFG);
6533         val &= ~0xff;
6534         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6535         tw32(GRC_MISC_CFG, val);
6536
6537         /* Initialize MBUF/DESC pool. */
6538         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6539                 /* Do nothing.  */
6540         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6541                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6542                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6543                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6544                 else
6545                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6546                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6547                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6548         }
6549         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6550                 int fw_len;
6551
6552                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6553                           TG3_TSO5_FW_RODATA_LEN +
6554                           TG3_TSO5_FW_DATA_LEN +
6555                           TG3_TSO5_FW_SBSS_LEN +
6556                           TG3_TSO5_FW_BSS_LEN);
6557                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6558                 tw32(BUFMGR_MB_POOL_ADDR,
6559                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6560                 tw32(BUFMGR_MB_POOL_SIZE,
6561                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6562         }
6563
6564         if (tp->dev->mtu <= ETH_DATA_LEN) {
6565                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6566                      tp->bufmgr_config.mbuf_read_dma_low_water);
6567                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6568                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6569                 tw32(BUFMGR_MB_HIGH_WATER,
6570                      tp->bufmgr_config.mbuf_high_water);
6571         } else {
6572                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6573                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6574                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6575                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6576                 tw32(BUFMGR_MB_HIGH_WATER,
6577                      tp->bufmgr_config.mbuf_high_water_jumbo);
6578         }
6579         tw32(BUFMGR_DMA_LOW_WATER,
6580              tp->bufmgr_config.dma_low_water);
6581         tw32(BUFMGR_DMA_HIGH_WATER,
6582              tp->bufmgr_config.dma_high_water);
6583
6584         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6585         for (i = 0; i < 2000; i++) {
6586                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6587                         break;
6588                 udelay(10);
6589         }
6590         if (i >= 2000) {
6591                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6592                        tp->dev->name);
6593                 return -ENODEV;
6594         }
6595
6596         /* Setup replenish threshold. */
6597         val = tp->rx_pending / 8;
6598         if (val == 0)
6599                 val = 1;
6600         else if (val > tp->rx_std_max_post)
6601                 val = tp->rx_std_max_post;
6602         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6603                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6604                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6605
6606                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6607                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6608         }
6609
6610         tw32(RCVBDI_STD_THRESH, val);
6611
6612         /* Initialize TG3_BDINFO's at:
6613          *  RCVDBDI_STD_BD:     standard eth size rx ring
6614          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6615          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6616          *
6617          * like so:
6618          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6619          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6620          *                              ring attribute flags
6621          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6622          *
6623          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6624          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6625          *
6626          * The size of each ring is fixed in the firmware, but the location is
6627          * configurable.
6628          */
6629         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6630              ((u64) tp->rx_std_mapping >> 32));
6631         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6632              ((u64) tp->rx_std_mapping & 0xffffffff));
6633         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6634              NIC_SRAM_RX_BUFFER_DESC);
6635
6636         /* Don't even try to program the JUMBO/MINI buffer descriptor
6637          * configs on 5705.
6638          */
6639         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6640                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6641                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6642         } else {
6643                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6644                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6645
6646                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6647                      BDINFO_FLAGS_DISABLED);
6648
6649                 /* Setup replenish threshold. */
6650                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6651
6652                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6653                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6654                              ((u64) tp->rx_jumbo_mapping >> 32));
6655                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6656                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6657                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6658                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6659                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6660                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6661                 } else {
6662                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6663                              BDINFO_FLAGS_DISABLED);
6664                 }
6665
6666         }
6667
6668         /* There is only one send ring on 5705/5750, no need to explicitly
6669          * disable the others.
6670          */
6671         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6672                 /* Clear out send RCB ring in SRAM. */
6673                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6674                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6675                                       BDINFO_FLAGS_DISABLED);
6676         }
6677
6678         tp->tx_prod = 0;
6679         tp->tx_cons = 0;
6680         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6681         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6682
6683         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6684                        tp->tx_desc_mapping,
6685                        (TG3_TX_RING_SIZE <<
6686                         BDINFO_FLAGS_MAXLEN_SHIFT),
6687                        NIC_SRAM_TX_BUFFER_DESC);
6688
6689         /* There is only one receive return ring on 5705/5750, no need
6690          * to explicitly disable the others.
6691          */
6692         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6693                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6694                      i += TG3_BDINFO_SIZE) {
6695                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6696                                       BDINFO_FLAGS_DISABLED);
6697                 }
6698         }
6699
6700         tp->rx_rcb_ptr = 0;
6701         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6702
6703         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6704                        tp->rx_rcb_mapping,
6705                        (TG3_RX_RCB_RING_SIZE(tp) <<
6706                         BDINFO_FLAGS_MAXLEN_SHIFT),
6707                        0);
6708
6709         tp->rx_std_ptr = tp->rx_pending;
6710         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6711                      tp->rx_std_ptr);
6712
6713         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6714                                                 tp->rx_jumbo_pending : 0;
6715         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6716                      tp->rx_jumbo_ptr);
6717
6718         /* Initialize MAC address and backoff seed. */
6719         __tg3_set_mac_addr(tp, 0);
6720
6721         /* MTU + ethernet header + FCS + optional VLAN tag */
6722         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6723
6724         /* The slot time is changed by tg3_setup_phy if we
6725          * run at gigabit with half duplex.
6726          */
6727         tw32(MAC_TX_LENGTHS,
6728              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6729              (6 << TX_LENGTHS_IPG_SHIFT) |
6730              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6731
6732         /* Receive rules. */
6733         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6734         tw32(RCVLPC_CONFIG, 0x0181);
6735
6736         /* Calculate RDMAC_MODE setting early, we need it to determine
6737          * the RCVLPC_STATE_ENABLE mask.
6738          */
6739         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6740                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6741                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6742                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6743                       RDMAC_MODE_LNGREAD_ENAB);
6744
6745         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6746                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6747                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6748                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6749
6750         /* If statement applies to 5705 and 5750 PCI devices only */
6751         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6752              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6753             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6754                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6755                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6756                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6757                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6758                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6759                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6760                 }
6761         }
6762
6763         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6764                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6765
6766         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6767                 rdmac_mode |= (1 << 27);
6768
6769         /* Receive/send statistics. */
6770         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6771                 val = tr32(RCVLPC_STATS_ENABLE);
6772                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6773                 tw32(RCVLPC_STATS_ENABLE, val);
6774         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6775                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6776                 val = tr32(RCVLPC_STATS_ENABLE);
6777                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6778                 tw32(RCVLPC_STATS_ENABLE, val);
6779         } else {
6780                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6781         }
6782         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6783         tw32(SNDDATAI_STATSENAB, 0xffffff);
6784         tw32(SNDDATAI_STATSCTRL,
6785              (SNDDATAI_SCTRL_ENABLE |
6786               SNDDATAI_SCTRL_FASTUPD));
6787
6788         /* Setup host coalescing engine. */
6789         tw32(HOSTCC_MODE, 0);
6790         for (i = 0; i < 2000; i++) {
6791                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6792                         break;
6793                 udelay(10);
6794         }
6795
6796         __tg3_set_coalesce(tp, &tp->coal);
6797
6798         /* set status block DMA address */
6799         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6800              ((u64) tp->status_mapping >> 32));
6801         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6802              ((u64) tp->status_mapping & 0xffffffff));
6803
6804         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6805                 /* Status/statistics block address.  See tg3_timer,
6806                  * the tg3_periodic_fetch_stats call there, and
6807                  * tg3_get_stats to see how this works for 5705/5750 chips.
6808                  */
6809                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6810                      ((u64) tp->stats_mapping >> 32));
6811                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6812                      ((u64) tp->stats_mapping & 0xffffffff));
6813                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6814                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6815         }
6816
6817         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6818
6819         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6820         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6821         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6822                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6823
6824         /* Clear statistics/status block in chip, and status block in ram. */
6825         for (i = NIC_SRAM_STATS_BLK;
6826              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6827              i += sizeof(u32)) {
6828                 tg3_write_mem(tp, i, 0);
6829                 udelay(40);
6830         }
6831         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6832
6833         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6834                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6835                 /* reset to prevent losing 1st rx packet intermittently */
6836                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6837                 udelay(10);
6838         }
6839
6840         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6841                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6842         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6843             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6844             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
6845                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6846         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6847         udelay(40);
6848
6849         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6850          * If TG3_FLG2_IS_NIC is zero, we should read the
6851          * register to preserve the GPIO settings for LOMs. The GPIOs,
6852          * whether used as inputs or outputs, are set by boot code after
6853          * reset.
6854          */
6855         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
6856                 u32 gpio_mask;
6857
6858                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6859                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6860                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
6861
6862                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6863                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6864                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6865
6866                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6867                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6868
6869                 tp->grc_local_ctrl &= ~gpio_mask;
6870                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6871
6872                 /* GPIO1 must be driven high for eeprom write protect */
6873                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6874                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6875                                                GRC_LCLCTRL_GPIO_OUTPUT1);
6876         }
6877         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6878         udelay(100);
6879
6880         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6881         tp->last_tag = 0;
6882
6883         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6884                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6885                 udelay(40);
6886         }
6887
6888         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6889                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6890                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6891                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6892                WDMAC_MODE_LNGREAD_ENAB);
6893
6894         /* If statement applies to 5705 and 5750 PCI devices only */
6895         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6896              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6897             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6898                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6899                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6900                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6901                         /* nothing */
6902                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6903                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6904                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6905                         val |= WDMAC_MODE_RX_ACCEL;
6906                 }
6907         }
6908
6909         /* Enable host coalescing bug fix */
6910         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6911             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
6912             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
6913             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
6914                 val |= (1 << 29);
6915
6916         tw32_f(WDMAC_MODE, val);
6917         udelay(40);
6918
6919         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6920                 u16 pcix_cmd;
6921
6922                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6923                                      &pcix_cmd);
6924                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6925                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
6926                         pcix_cmd |= PCI_X_CMD_READ_2K;
6927                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6928                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
6929                         pcix_cmd |= PCI_X_CMD_READ_2K;
6930                 }
6931                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6932                                       pcix_cmd);
6933         }
6934
6935         tw32_f(RDMAC_MODE, rdmac_mode);
6936         udelay(40);
6937
6938         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6939         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6940                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6941
6942         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6943                 tw32(SNDDATAC_MODE,
6944                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
6945         else
6946                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6947
6948         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6949         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6950         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6951         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6952         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6953                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6954         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6955         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6956
6957         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6958                 err = tg3_load_5701_a0_firmware_fix(tp);
6959                 if (err)
6960                         return err;
6961         }
6962
6963         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6964                 err = tg3_load_tso_firmware(tp);
6965                 if (err)
6966                         return err;
6967         }
6968
6969         tp->tx_mode = TX_MODE_ENABLE;
6970         tw32_f(MAC_TX_MODE, tp->tx_mode);
6971         udelay(100);
6972
6973         tp->rx_mode = RX_MODE_ENABLE;
6974         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
6975             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6976                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6977
6978         tw32_f(MAC_RX_MODE, tp->rx_mode);
6979         udelay(10);
6980
6981         if (tp->link_config.phy_is_low_power) {
6982                 tp->link_config.phy_is_low_power = 0;
6983                 tp->link_config.speed = tp->link_config.orig_speed;
6984                 tp->link_config.duplex = tp->link_config.orig_duplex;
6985                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6986         }
6987
6988         tp->mi_mode = MAC_MI_MODE_BASE;
6989         tw32_f(MAC_MI_MODE, tp->mi_mode);
6990         udelay(80);
6991
6992         tw32(MAC_LED_CTRL, tp->led_ctrl);
6993
6994         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6995         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6996                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6997                 udelay(10);
6998         }
6999         tw32_f(MAC_RX_MODE, tp->rx_mode);
7000         udelay(10);
7001
7002         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7003                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7004                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7005                         /* Set drive transmission level to 1.2V  */
7006                         /* only if the signal pre-emphasis bit is not set  */
7007                         val = tr32(MAC_SERDES_CFG);
7008                         val &= 0xfffff000;
7009                         val |= 0x880;
7010                         tw32(MAC_SERDES_CFG, val);
7011                 }
7012                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7013                         tw32(MAC_SERDES_CFG, 0x616000);
7014         }
7015
7016         /* Prevent chip from dropping frames when flow control
7017          * is enabled.
7018          */
7019         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7020
7021         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7022             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7023                 /* Use hardware link auto-negotiation */
7024                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7025         }
7026
7027         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7028             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7029                 u32 tmp;
7030
7031                 tmp = tr32(SERDES_RX_CTRL);
7032                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7033                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7034                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7035                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7036         }
7037
7038         err = tg3_setup_phy(tp, 0);
7039         if (err)
7040                 return err;
7041
7042         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7043             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7044                 u32 tmp;
7045
7046                 /* Clear CRC stats. */
7047                 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7048                         tg3_writephy(tp, MII_TG3_TEST1,
7049                                      tmp | MII_TG3_TEST1_CRC_EN);
7050                         tg3_readphy(tp, 0x14, &tmp);
7051                 }
7052         }
7053
7054         __tg3_set_rx_mode(tp->dev);
7055
7056         /* Initialize receive rules. */
7057         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
7058         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7059         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
7060         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7061
7062         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7063             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7064                 limit = 8;
7065         else
7066                 limit = 16;
7067         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7068                 limit -= 4;
7069         switch (limit) {
7070         case 16:
7071                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
7072         case 15:
7073                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
7074         case 14:
7075                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
7076         case 13:
7077                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
7078         case 12:
7079                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
7080         case 11:
7081                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
7082         case 10:
7083                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
7084         case 9:
7085                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
7086         case 8:
7087                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
7088         case 7:
7089                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
7090         case 6:
7091                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
7092         case 5:
7093                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
7094         case 4:
7095                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
7096         case 3:
7097                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
7098         case 2:
7099         case 1:
7100
7101         default:
7102                 break;
7103         };
7104
7105         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7106                 /* Write our heartbeat update interval to APE. */
7107                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7108                                 APE_HOST_HEARTBEAT_INT_DISABLE);
7109
7110         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7111
7112         return 0;
7113 }
7114
7115 /* Called at device open time to get the chip ready for
7116  * packet processing.  Invoked with tp->lock held.
7117  */
7118 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7119 {
7120         int err;
7121
7122         /* Force the chip into D0. */
7123         err = tg3_set_power_state(tp, PCI_D0);
7124         if (err)
7125                 goto out;
7126
7127         tg3_switch_clocks(tp);
7128
7129         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7130
7131         err = tg3_reset_hw(tp, reset_phy);
7132
7133 out:
7134         return err;
7135 }
7136
7137 #define TG3_STAT_ADD32(PSTAT, REG) \
7138 do {    u32 __val = tr32(REG); \
7139         (PSTAT)->low += __val; \
7140         if ((PSTAT)->low < __val) \
7141                 (PSTAT)->high += 1; \
7142 } while (0)
7143
7144 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7145 {
7146         struct tg3_hw_stats *sp = tp->hw_stats;
7147
7148         if (!netif_carrier_ok(tp->dev))
7149                 return;
7150
7151         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7152         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7153         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7154         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7155         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7156         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7157         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7158         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7159         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7160         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7161         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7162         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7163         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7164
7165         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7166         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7167         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7168         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7169         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7170         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7171         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7172         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7173         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7174         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7175         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7176         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7177         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7178         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7179
7180         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7181         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7182         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7183 }
7184
7185 static void tg3_timer(unsigned long __opaque)
7186 {
7187         struct tg3 *tp = (struct tg3 *) __opaque;
7188
7189         if (tp->irq_sync)
7190                 goto restart_timer;
7191
7192         spin_lock(&tp->lock);
7193
7194         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7195                 /* All of this garbage is because when using non-tagged
7196                  * IRQ status the mailbox/status_block protocol the chip
7197                  * uses with the cpu is race prone.
7198                  */
7199                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7200                         tw32(GRC_LOCAL_CTRL,
7201                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7202                 } else {
7203                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7204                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7205                 }
7206
7207                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7208                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7209                         spin_unlock(&tp->lock);
7210                         schedule_work(&tp->reset_task);
7211                         return;
7212                 }
7213         }
7214
7215         /* This part only runs once per second. */
7216         if (!--tp->timer_counter) {
7217                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7218                         tg3_periodic_fetch_stats(tp);
7219
7220                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7221                         u32 mac_stat;
7222                         int phy_event;
7223
7224                         mac_stat = tr32(MAC_STATUS);
7225
7226                         phy_event = 0;
7227                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7228                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7229                                         phy_event = 1;
7230                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7231                                 phy_event = 1;
7232
7233                         if (phy_event)
7234                                 tg3_setup_phy(tp, 0);
7235                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7236                         u32 mac_stat = tr32(MAC_STATUS);
7237                         int need_setup = 0;
7238
7239                         if (netif_carrier_ok(tp->dev) &&
7240                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7241                                 need_setup = 1;
7242                         }
7243                         if (! netif_carrier_ok(tp->dev) &&
7244                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
7245                                          MAC_STATUS_SIGNAL_DET))) {
7246                                 need_setup = 1;
7247                         }
7248                         if (need_setup) {
7249                                 if (!tp->serdes_counter) {
7250                                         tw32_f(MAC_MODE,
7251                                              (tp->mac_mode &
7252                                               ~MAC_MODE_PORT_MODE_MASK));
7253                                         udelay(40);
7254                                         tw32_f(MAC_MODE, tp->mac_mode);
7255                                         udelay(40);
7256                                 }
7257                                 tg3_setup_phy(tp, 0);
7258                         }
7259                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7260                         tg3_serdes_parallel_detect(tp);
7261
7262                 tp->timer_counter = tp->timer_multiplier;
7263         }
7264
7265         /* Heartbeat is only sent once every 2 seconds.
7266          *
7267          * The heartbeat is to tell the ASF firmware that the host
7268          * driver is still alive.  In the event that the OS crashes,
7269          * ASF needs to reset the hardware to free up the FIFO space
7270          * that may be filled with rx packets destined for the host.
7271          * If the FIFO is full, ASF will no longer function properly.
7272          *
7273          * Unintended resets have been reported on real time kernels
7274          * where the timer doesn't run on time.  Netpoll will also have
7275          * same problem.
7276          *
7277          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7278          * to check the ring condition when the heartbeat is expiring
7279          * before doing the reset.  This will prevent most unintended
7280          * resets.
7281          */
7282         if (!--tp->asf_counter) {
7283                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7284                         u32 val;
7285
7286                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7287                                       FWCMD_NICDRV_ALIVE3);
7288                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7289                         /* 5 seconds timeout */
7290                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7291                         val = tr32(GRC_RX_CPU_EVENT);
7292                         val |= (1 << 14);
7293                         tw32(GRC_RX_CPU_EVENT, val);
7294                 }
7295                 tp->asf_counter = tp->asf_multiplier;
7296         }
7297
7298         spin_unlock(&tp->lock);
7299
7300 restart_timer:
7301         tp->timer.expires = jiffies + tp->timer_offset;
7302         add_timer(&tp->timer);
7303 }
7304
7305 static int tg3_request_irq(struct tg3 *tp)
7306 {
7307         irq_handler_t fn;
7308         unsigned long flags;
7309         struct net_device *dev = tp->dev;
7310
7311         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7312                 fn = tg3_msi;
7313                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7314                         fn = tg3_msi_1shot;
7315                 flags = IRQF_SAMPLE_RANDOM;
7316         } else {
7317                 fn = tg3_interrupt;
7318                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7319                         fn = tg3_interrupt_tagged;
7320                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7321         }
7322         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7323 }
7324
7325 static int tg3_test_interrupt(struct tg3 *tp)
7326 {
7327         struct net_device *dev = tp->dev;
7328         int err, i, intr_ok = 0;
7329
7330         if (!netif_running(dev))
7331                 return -ENODEV;
7332
7333         tg3_disable_ints(tp);
7334
7335         free_irq(tp->pdev->irq, dev);
7336
7337         err = request_irq(tp->pdev->irq, tg3_test_isr,
7338                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7339         if (err)
7340                 return err;
7341
7342         tp->hw_status->status &= ~SD_STATUS_UPDATED;
7343         tg3_enable_ints(tp);
7344
7345         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7346                HOSTCC_MODE_NOW);
7347
7348         for (i = 0; i < 5; i++) {
7349                 u32 int_mbox, misc_host_ctrl;
7350
7351                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7352                                         TG3_64BIT_REG_LOW);
7353                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7354
7355                 if ((int_mbox != 0) ||
7356                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7357                         intr_ok = 1;
7358                         break;
7359                 }
7360
7361                 msleep(10);
7362         }
7363
7364         tg3_disable_ints(tp);
7365
7366         free_irq(tp->pdev->irq, dev);
7367
7368         err = tg3_request_irq(tp);
7369
7370         if (err)
7371                 return err;
7372
7373         if (intr_ok)
7374                 return 0;
7375
7376         return -EIO;
7377 }
7378
7379 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7380  * successfully restored
7381  */
7382 static int tg3_test_msi(struct tg3 *tp)
7383 {
7384         struct net_device *dev = tp->dev;
7385         int err;
7386         u16 pci_cmd;
7387
7388         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7389                 return 0;
7390
7391         /* Turn off SERR reporting in case MSI terminates with Master
7392          * Abort.
7393          */
7394         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7395         pci_write_config_word(tp->pdev, PCI_COMMAND,
7396                               pci_cmd & ~PCI_COMMAND_SERR);
7397
7398         err = tg3_test_interrupt(tp);
7399
7400         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7401
7402         if (!err)
7403                 return 0;
7404
7405         /* other failures */
7406         if (err != -EIO)
7407                 return err;
7408
7409         /* MSI test failed, go back to INTx mode */
7410         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7411                "switching to INTx mode. Please report this failure to "
7412                "the PCI maintainer and include system chipset information.\n",
7413                        tp->dev->name);
7414
7415         free_irq(tp->pdev->irq, dev);
7416         pci_disable_msi(tp->pdev);
7417
7418         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7419
7420         err = tg3_request_irq(tp);
7421         if (err)
7422                 return err;
7423
7424         /* Need to reset the chip because the MSI cycle may have terminated
7425          * with Master Abort.
7426          */
7427         tg3_full_lock(tp, 1);
7428
7429         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7430         err = tg3_init_hw(tp, 1);
7431
7432         tg3_full_unlock(tp);
7433
7434         if (err)
7435                 free_irq(tp->pdev->irq, dev);
7436
7437         return err;
7438 }
7439
7440 static int tg3_open(struct net_device *dev)
7441 {
7442         struct tg3 *tp = netdev_priv(dev);
7443         int err;
7444
7445         netif_carrier_off(tp->dev);
7446
7447         tg3_full_lock(tp, 0);
7448
7449         err = tg3_set_power_state(tp, PCI_D0);
7450         if (err) {
7451                 tg3_full_unlock(tp);
7452                 return err;
7453         }
7454
7455         tg3_disable_ints(tp);
7456         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7457
7458         tg3_full_unlock(tp);
7459
7460         /* The placement of this call is tied
7461          * to the setup and use of Host TX descriptors.
7462          */
7463         err = tg3_alloc_consistent(tp);
7464         if (err)
7465                 return err;
7466
7467         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7468                 /* All MSI supporting chips should support tagged
7469                  * status.  Assert that this is the case.
7470                  */
7471                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7472                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7473                                "Not using MSI.\n", tp->dev->name);
7474                 } else if (pci_enable_msi(tp->pdev) == 0) {
7475                         u32 msi_mode;
7476
7477                         msi_mode = tr32(MSGINT_MODE);
7478                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7479                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7480                 }
7481         }
7482         err = tg3_request_irq(tp);
7483
7484         if (err) {
7485                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7486                         pci_disable_msi(tp->pdev);
7487                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7488                 }
7489                 tg3_free_consistent(tp);
7490                 return err;
7491         }
7492
7493         napi_enable(&tp->napi);
7494
7495         tg3_full_lock(tp, 0);
7496
7497         err = tg3_init_hw(tp, 1);
7498         if (err) {
7499                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7500                 tg3_free_rings(tp);
7501         } else {
7502                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7503                         tp->timer_offset = HZ;
7504                 else
7505                         tp->timer_offset = HZ / 10;
7506
7507                 BUG_ON(tp->timer_offset > HZ);
7508                 tp->timer_counter = tp->timer_multiplier =
7509                         (HZ / tp->timer_offset);
7510                 tp->asf_counter = tp->asf_multiplier =
7511                         ((HZ / tp->timer_offset) * 2);
7512
7513                 init_timer(&tp->timer);
7514                 tp->timer.expires = jiffies + tp->timer_offset;
7515                 tp->timer.data = (unsigned long) tp;
7516                 tp->timer.function = tg3_timer;
7517         }
7518
7519         tg3_full_unlock(tp);
7520
7521         if (err) {
7522                 napi_disable(&tp->napi);
7523                 free_irq(tp->pdev->irq, dev);
7524                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7525                         pci_disable_msi(tp->pdev);
7526                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7527                 }
7528                 tg3_free_consistent(tp);
7529                 return err;
7530         }
7531
7532         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7533                 err = tg3_test_msi(tp);
7534
7535                 if (err) {
7536                         tg3_full_lock(tp, 0);
7537
7538                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7539                                 pci_disable_msi(tp->pdev);
7540                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7541                         }
7542                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7543                         tg3_free_rings(tp);
7544                         tg3_free_consistent(tp);
7545
7546                         tg3_full_unlock(tp);
7547
7548                         napi_disable(&tp->napi);
7549
7550                         return err;
7551                 }
7552
7553                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7554                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7555                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
7556
7557                                 tw32(PCIE_TRANSACTION_CFG,
7558                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
7559                         }
7560                 }
7561         }
7562
7563         tg3_full_lock(tp, 0);
7564
7565         add_timer(&tp->timer);
7566         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7567         tg3_enable_ints(tp);
7568
7569         tg3_full_unlock(tp);
7570
7571         netif_start_queue(dev);
7572
7573         return 0;
7574 }
7575
7576 #if 0
7577 /*static*/ void tg3_dump_state(struct tg3 *tp)
7578 {
7579         u32 val32, val32_2, val32_3, val32_4, val32_5;
7580         u16 val16;
7581         int i;
7582
7583         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7584         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7585         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7586                val16, val32);
7587
7588         /* MAC block */
7589         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7590                tr32(MAC_MODE), tr32(MAC_STATUS));
7591         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7592                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7593         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7594                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7595         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7596                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7597
7598         /* Send data initiator control block */
7599         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7600                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7601         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7602                tr32(SNDDATAI_STATSCTRL));
7603
7604         /* Send data completion control block */
7605         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7606
7607         /* Send BD ring selector block */
7608         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7609                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7610
7611         /* Send BD initiator control block */
7612         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7613                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7614
7615         /* Send BD completion control block */
7616         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7617
7618         /* Receive list placement control block */
7619         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7620                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7621         printk("       RCVLPC_STATSCTRL[%08x]\n",
7622                tr32(RCVLPC_STATSCTRL));
7623
7624         /* Receive data and receive BD initiator control block */
7625         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7626                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7627
7628         /* Receive data completion control block */
7629         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7630                tr32(RCVDCC_MODE));
7631
7632         /* Receive BD initiator control block */
7633         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7634                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7635
7636         /* Receive BD completion control block */
7637         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7638                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7639
7640         /* Receive list selector control block */
7641         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7642                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7643
7644         /* Mbuf cluster free block */
7645         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7646                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7647
7648         /* Host coalescing control block */
7649         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7650                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7651         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7652                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7653                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7654         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7655                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7656                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7657         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7658                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7659         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7660                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7661
7662         /* Memory arbiter control block */
7663         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7664                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7665
7666         /* Buffer manager control block */
7667         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7668                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7669         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7670                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7671         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7672                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7673                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7674                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7675
7676         /* Read DMA control block */
7677         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7678                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7679
7680         /* Write DMA control block */
7681         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7682                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7683
7684         /* DMA completion block */
7685         printk("DEBUG: DMAC_MODE[%08x]\n",
7686                tr32(DMAC_MODE));
7687
7688         /* GRC block */
7689         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7690                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7691         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7692                tr32(GRC_LOCAL_CTRL));
7693
7694         /* TG3_BDINFOs */
7695         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7696                tr32(RCVDBDI_JUMBO_BD + 0x0),
7697                tr32(RCVDBDI_JUMBO_BD + 0x4),
7698                tr32(RCVDBDI_JUMBO_BD + 0x8),
7699                tr32(RCVDBDI_JUMBO_BD + 0xc));
7700         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7701                tr32(RCVDBDI_STD_BD + 0x0),
7702                tr32(RCVDBDI_STD_BD + 0x4),
7703                tr32(RCVDBDI_STD_BD + 0x8),
7704                tr32(RCVDBDI_STD_BD + 0xc));
7705         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7706                tr32(RCVDBDI_MINI_BD + 0x0),
7707                tr32(RCVDBDI_MINI_BD + 0x4),
7708                tr32(RCVDBDI_MINI_BD + 0x8),
7709                tr32(RCVDBDI_MINI_BD + 0xc));
7710
7711         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7712         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7713         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7714         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7715         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7716                val32, val32_2, val32_3, val32_4);
7717
7718         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7719         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7720         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7721         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7722         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7723                val32, val32_2, val32_3, val32_4);
7724
7725         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7726         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7727         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7728         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7729         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7730         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7731                val32, val32_2, val32_3, val32_4, val32_5);
7732
7733         /* SW status block */
7734         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7735                tp->hw_status->status,
7736                tp->hw_status->status_tag,
7737                tp->hw_status->rx_jumbo_consumer,
7738                tp->hw_status->rx_consumer,
7739                tp->hw_status->rx_mini_consumer,
7740                tp->hw_status->idx[0].rx_producer,
7741                tp->hw_status->idx[0].tx_consumer);
7742
7743         /* SW statistics block */
7744         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7745                ((u32 *)tp->hw_stats)[0],
7746                ((u32 *)tp->hw_stats)[1],
7747                ((u32 *)tp->hw_stats)[2],
7748                ((u32 *)tp->hw_stats)[3]);
7749
7750         /* Mailboxes */
7751         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7752                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7753                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7754                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7755                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7756
7757         /* NIC side send descriptors. */
7758         for (i = 0; i < 6; i++) {
7759                 unsigned long txd;
7760
7761                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7762                         + (i * sizeof(struct tg3_tx_buffer_desc));
7763                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7764                        i,
7765                        readl(txd + 0x0), readl(txd + 0x4),
7766                        readl(txd + 0x8), readl(txd + 0xc));
7767         }
7768
7769         /* NIC side RX descriptors. */
7770         for (i = 0; i < 6; i++) {
7771                 unsigned long rxd;
7772
7773                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7774                         + (i * sizeof(struct tg3_rx_buffer_desc));
7775                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7776                        i,
7777                        readl(rxd + 0x0), readl(rxd + 0x4),
7778                        readl(rxd + 0x8), readl(rxd + 0xc));
7779                 rxd += (4 * sizeof(u32));
7780                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7781                        i,
7782                        readl(rxd + 0x0), readl(rxd + 0x4),
7783                        readl(rxd + 0x8), readl(rxd + 0xc));
7784         }
7785
7786         for (i = 0; i < 6; i++) {
7787                 unsigned long rxd;
7788
7789                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7790                         + (i * sizeof(struct tg3_rx_buffer_desc));
7791                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7792                        i,
7793                        readl(rxd + 0x0), readl(rxd + 0x4),
7794                        readl(rxd + 0x8), readl(rxd + 0xc));
7795                 rxd += (4 * sizeof(u32));
7796                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7797                        i,
7798                        readl(rxd + 0x0), readl(rxd + 0x4),
7799                        readl(rxd + 0x8), readl(rxd + 0xc));
7800         }
7801 }
7802 #endif
7803
7804 static struct net_device_stats *tg3_get_stats(struct net_device *);
7805 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7806
7807 static int tg3_close(struct net_device *dev)
7808 {
7809         struct tg3 *tp = netdev_priv(dev);
7810
7811         napi_disable(&tp->napi);
7812         cancel_work_sync(&tp->reset_task);
7813
7814         netif_stop_queue(dev);
7815
7816         del_timer_sync(&tp->timer);
7817
7818         tg3_full_lock(tp, 1);
7819 #if 0
7820         tg3_dump_state(tp);
7821 #endif
7822
7823         tg3_disable_ints(tp);
7824
7825         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7826         tg3_free_rings(tp);
7827         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7828
7829         tg3_full_unlock(tp);
7830
7831         free_irq(tp->pdev->irq, dev);
7832         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7833                 pci_disable_msi(tp->pdev);
7834                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7835         }
7836
7837         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7838                sizeof(tp->net_stats_prev));
7839         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7840                sizeof(tp->estats_prev));
7841
7842         tg3_free_consistent(tp);
7843
7844         tg3_set_power_state(tp, PCI_D3hot);
7845
7846         netif_carrier_off(tp->dev);
7847
7848         return 0;
7849 }
7850
7851 static inline unsigned long get_stat64(tg3_stat64_t *val)
7852 {
7853         unsigned long ret;
7854
7855 #if (BITS_PER_LONG == 32)
7856         ret = val->low;
7857 #else
7858         ret = ((u64)val->high << 32) | ((u64)val->low);
7859 #endif
7860         return ret;
7861 }
7862
7863 static unsigned long calc_crc_errors(struct tg3 *tp)
7864 {
7865         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7866
7867         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7868             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7869              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7870                 u32 val;
7871
7872                 spin_lock_bh(&tp->lock);
7873                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
7874                         tg3_writephy(tp, MII_TG3_TEST1,
7875                                      val | MII_TG3_TEST1_CRC_EN);
7876                         tg3_readphy(tp, 0x14, &val);
7877                 } else
7878                         val = 0;
7879                 spin_unlock_bh(&tp->lock);
7880
7881                 tp->phy_crc_errors += val;
7882
7883                 return tp->phy_crc_errors;
7884         }
7885
7886         return get_stat64(&hw_stats->rx_fcs_errors);
7887 }
7888
7889 #define ESTAT_ADD(member) \
7890         estats->member =        old_estats->member + \
7891                                 get_stat64(&hw_stats->member)
7892
7893 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7894 {
7895         struct tg3_ethtool_stats *estats = &tp->estats;
7896         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7897         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7898
7899         if (!hw_stats)
7900                 return old_estats;
7901
7902         ESTAT_ADD(rx_octets);
7903         ESTAT_ADD(rx_fragments);
7904         ESTAT_ADD(rx_ucast_packets);
7905         ESTAT_ADD(rx_mcast_packets);
7906         ESTAT_ADD(rx_bcast_packets);
7907         ESTAT_ADD(rx_fcs_errors);
7908         ESTAT_ADD(rx_align_errors);
7909         ESTAT_ADD(rx_xon_pause_rcvd);
7910         ESTAT_ADD(rx_xoff_pause_rcvd);
7911         ESTAT_ADD(rx_mac_ctrl_rcvd);
7912         ESTAT_ADD(rx_xoff_entered);
7913         ESTAT_ADD(rx_frame_too_long_errors);
7914         ESTAT_ADD(rx_jabbers);
7915         ESTAT_ADD(rx_undersize_packets);
7916         ESTAT_ADD(rx_in_length_errors);
7917         ESTAT_ADD(rx_out_length_errors);
7918         ESTAT_ADD(rx_64_or_less_octet_packets);
7919         ESTAT_ADD(rx_65_to_127_octet_packets);
7920         ESTAT_ADD(rx_128_to_255_octet_packets);
7921         ESTAT_ADD(rx_256_to_511_octet_packets);
7922         ESTAT_ADD(rx_512_to_1023_octet_packets);
7923         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7924         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7925         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7926         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7927         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7928
7929         ESTAT_ADD(tx_octets);
7930         ESTAT_ADD(tx_collisions);
7931         ESTAT_ADD(tx_xon_sent);
7932         ESTAT_ADD(tx_xoff_sent);
7933         ESTAT_ADD(tx_flow_control);
7934         ESTAT_ADD(tx_mac_errors);
7935         ESTAT_ADD(tx_single_collisions);
7936         ESTAT_ADD(tx_mult_collisions);
7937         ESTAT_ADD(tx_deferred);
7938         ESTAT_ADD(tx_excessive_collisions);
7939         ESTAT_ADD(tx_late_collisions);
7940         ESTAT_ADD(tx_collide_2times);
7941         ESTAT_ADD(tx_collide_3times);
7942         ESTAT_ADD(tx_collide_4times);
7943         ESTAT_ADD(tx_collide_5times);
7944         ESTAT_ADD(tx_collide_6times);
7945         ESTAT_ADD(tx_collide_7times);
7946         ESTAT_ADD(tx_collide_8times);
7947         ESTAT_ADD(tx_collide_9times);
7948         ESTAT_ADD(tx_collide_10times);
7949         ESTAT_ADD(tx_collide_11times);
7950         ESTAT_ADD(tx_collide_12times);
7951         ESTAT_ADD(tx_collide_13times);
7952         ESTAT_ADD(tx_collide_14times);
7953         ESTAT_ADD(tx_collide_15times);
7954         ESTAT_ADD(tx_ucast_packets);
7955         ESTAT_ADD(tx_mcast_packets);
7956         ESTAT_ADD(tx_bcast_packets);
7957         ESTAT_ADD(tx_carrier_sense_errors);
7958         ESTAT_ADD(tx_discards);
7959         ESTAT_ADD(tx_errors);
7960
7961         ESTAT_ADD(dma_writeq_full);
7962         ESTAT_ADD(dma_write_prioq_full);
7963         ESTAT_ADD(rxbds_empty);
7964         ESTAT_ADD(rx_discards);
7965         ESTAT_ADD(rx_errors);
7966         ESTAT_ADD(rx_threshold_hit);
7967
7968         ESTAT_ADD(dma_readq_full);
7969         ESTAT_ADD(dma_read_prioq_full);
7970         ESTAT_ADD(tx_comp_queue_full);
7971
7972         ESTAT_ADD(ring_set_send_prod_index);
7973         ESTAT_ADD(ring_status_update);
7974         ESTAT_ADD(nic_irqs);
7975         ESTAT_ADD(nic_avoided_irqs);
7976         ESTAT_ADD(nic_tx_threshold_hit);
7977
7978         return estats;
7979 }
7980
7981 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7982 {
7983         struct tg3 *tp = netdev_priv(dev);
7984         struct net_device_stats *stats = &tp->net_stats;
7985         struct net_device_stats *old_stats = &tp->net_stats_prev;
7986         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7987
7988         if (!hw_stats)
7989                 return old_stats;
7990
7991         stats->rx_packets = old_stats->rx_packets +
7992                 get_stat64(&hw_stats->rx_ucast_packets) +
7993                 get_stat64(&hw_stats->rx_mcast_packets) +
7994                 get_stat64(&hw_stats->rx_bcast_packets);
7995
7996         stats->tx_packets = old_stats->tx_packets +
7997                 get_stat64(&hw_stats->tx_ucast_packets) +
7998                 get_stat64(&hw_stats->tx_mcast_packets) +
7999                 get_stat64(&hw_stats->tx_bcast_packets);
8000
8001         stats->rx_bytes = old_stats->rx_bytes +
8002                 get_stat64(&hw_stats->rx_octets);
8003         stats->tx_bytes = old_stats->tx_bytes +
8004                 get_stat64(&hw_stats->tx_octets);
8005
8006         stats->rx_errors = old_stats->rx_errors +
8007                 get_stat64(&hw_stats->rx_errors);
8008         stats->tx_errors = old_stats->tx_errors +
8009                 get_stat64(&hw_stats->tx_errors) +
8010                 get_stat64(&hw_stats->tx_mac_errors) +
8011                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8012                 get_stat64(&hw_stats->tx_discards);
8013
8014         stats->multicast = old_stats->multicast +
8015                 get_stat64(&hw_stats->rx_mcast_packets);
8016         stats->collisions = old_stats->collisions +
8017                 get_stat64(&hw_stats->tx_collisions);
8018
8019         stats->rx_length_errors = old_stats->rx_length_errors +
8020                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8021                 get_stat64(&hw_stats->rx_undersize_packets);
8022
8023         stats->rx_over_errors = old_stats->rx_over_errors +
8024                 get_stat64(&hw_stats->rxbds_empty);
8025         stats->rx_frame_errors = old_stats->rx_frame_errors +
8026                 get_stat64(&hw_stats->rx_align_errors);
8027         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8028                 get_stat64(&hw_stats->tx_discards);
8029         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8030                 get_stat64(&hw_stats->tx_carrier_sense_errors);
8031
8032         stats->rx_crc_errors = old_stats->rx_crc_errors +
8033                 calc_crc_errors(tp);
8034
8035         stats->rx_missed_errors = old_stats->rx_missed_errors +
8036                 get_stat64(&hw_stats->rx_discards);
8037
8038         return stats;
8039 }
8040
8041 static inline u32 calc_crc(unsigned char *buf, int len)
8042 {
8043         u32 reg;
8044         u32 tmp;
8045         int j, k;
8046
8047         reg = 0xffffffff;
8048
8049         for (j = 0; j < len; j++) {
8050                 reg ^= buf[j];
8051
8052                 for (k = 0; k < 8; k++) {
8053                         tmp = reg & 0x01;
8054
8055                         reg >>= 1;
8056
8057                         if (tmp) {
8058                                 reg ^= 0xedb88320;
8059                         }
8060                 }
8061         }
8062
8063         return ~reg;
8064 }
8065
8066 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8067 {
8068         /* accept or reject all multicast frames */
8069         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8070         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8071         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8072         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8073 }
8074
8075 static void __tg3_set_rx_mode(struct net_device *dev)
8076 {
8077         struct tg3 *tp = netdev_priv(dev);
8078         u32 rx_mode;
8079
8080         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8081                                   RX_MODE_KEEP_VLAN_TAG);
8082
8083         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8084          * flag clear.
8085          */
8086 #if TG3_VLAN_TAG_USED
8087         if (!tp->vlgrp &&
8088             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8089                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8090 #else
8091         /* By definition, VLAN is disabled always in this
8092          * case.
8093          */
8094         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8095                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8096 #endif
8097
8098         if (dev->flags & IFF_PROMISC) {
8099                 /* Promiscuous mode. */
8100                 rx_mode |= RX_MODE_PROMISC;
8101         } else if (dev->flags & IFF_ALLMULTI) {
8102                 /* Accept all multicast. */
8103                 tg3_set_multi (tp, 1);
8104         } else if (dev->mc_count < 1) {
8105                 /* Reject all multicast. */
8106                 tg3_set_multi (tp, 0);
8107         } else {
8108                 /* Accept one or more multicast(s). */
8109                 struct dev_mc_list *mclist;
8110                 unsigned int i;
8111                 u32 mc_filter[4] = { 0, };
8112                 u32 regidx;
8113                 u32 bit;
8114                 u32 crc;
8115
8116                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8117                      i++, mclist = mclist->next) {
8118
8119                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8120                         bit = ~crc & 0x7f;
8121                         regidx = (bit & 0x60) >> 5;
8122                         bit &= 0x1f;
8123                         mc_filter[regidx] |= (1 << bit);
8124                 }
8125
8126                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8127                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8128                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8129                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8130         }
8131
8132         if (rx_mode != tp->rx_mode) {
8133                 tp->rx_mode = rx_mode;
8134                 tw32_f(MAC_RX_MODE, rx_mode);
8135                 udelay(10);
8136         }
8137 }
8138
8139 static void tg3_set_rx_mode(struct net_device *dev)
8140 {
8141         struct tg3 *tp = netdev_priv(dev);
8142
8143         if (!netif_running(dev))
8144                 return;
8145
8146         tg3_full_lock(tp, 0);
8147         __tg3_set_rx_mode(dev);
8148         tg3_full_unlock(tp);
8149 }
8150
8151 #define TG3_REGDUMP_LEN         (32 * 1024)
8152
8153 static int tg3_get_regs_len(struct net_device *dev)
8154 {
8155         return TG3_REGDUMP_LEN;
8156 }
8157
8158 static void tg3_get_regs(struct net_device *dev,
8159                 struct ethtool_regs *regs, void *_p)
8160 {
8161         u32 *p = _p;
8162         struct tg3 *tp = netdev_priv(dev);
8163         u8 *orig_p = _p;
8164         int i;
8165
8166         regs->version = 0;
8167
8168         memset(p, 0, TG3_REGDUMP_LEN);
8169
8170         if (tp->link_config.phy_is_low_power)
8171                 return;
8172
8173         tg3_full_lock(tp, 0);
8174
8175 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8176 #define GET_REG32_LOOP(base,len)                \
8177 do {    p = (u32 *)(orig_p + (base));           \
8178         for (i = 0; i < len; i += 4)            \
8179                 __GET_REG32((base) + i);        \
8180 } while (0)
8181 #define GET_REG32_1(reg)                        \
8182 do {    p = (u32 *)(orig_p + (reg));            \
8183         __GET_REG32((reg));                     \
8184 } while (0)
8185
8186         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8187         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8188         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8189         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8190         GET_REG32_1(SNDDATAC_MODE);
8191         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8192         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8193         GET_REG32_1(SNDBDC_MODE);
8194         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8195         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8196         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8197         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8198         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8199         GET_REG32_1(RCVDCC_MODE);
8200         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8201         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8202         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8203         GET_REG32_1(MBFREE_MODE);
8204         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8205         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8206         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8207         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8208         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8209         GET_REG32_1(RX_CPU_MODE);
8210         GET_REG32_1(RX_CPU_STATE);
8211         GET_REG32_1(RX_CPU_PGMCTR);
8212         GET_REG32_1(RX_CPU_HWBKPT);
8213         GET_REG32_1(TX_CPU_MODE);
8214         GET_REG32_1(TX_CPU_STATE);
8215         GET_REG32_1(TX_CPU_PGMCTR);
8216         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8217         GET_REG32_LOOP(FTQ_RESET, 0x120);
8218         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8219         GET_REG32_1(DMAC_MODE);
8220         GET_REG32_LOOP(GRC_MODE, 0x4c);
8221         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8222                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8223
8224 #undef __GET_REG32
8225 #undef GET_REG32_LOOP
8226 #undef GET_REG32_1
8227
8228         tg3_full_unlock(tp);
8229 }
8230
8231 static int tg3_get_eeprom_len(struct net_device *dev)
8232 {
8233         struct tg3 *tp = netdev_priv(dev);
8234
8235         return tp->nvram_size;
8236 }
8237
8238 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8239 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8240 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8241
8242 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8243 {
8244         struct tg3 *tp = netdev_priv(dev);
8245         int ret;
8246         u8  *pd;
8247         u32 i, offset, len, b_offset, b_count;
8248         __le32 val;
8249
8250         if (tp->link_config.phy_is_low_power)
8251                 return -EAGAIN;
8252
8253         offset = eeprom->offset;
8254         len = eeprom->len;
8255         eeprom->len = 0;
8256
8257         eeprom->magic = TG3_EEPROM_MAGIC;
8258
8259         if (offset & 3) {
8260                 /* adjustments to start on required 4 byte boundary */
8261                 b_offset = offset & 3;
8262                 b_count = 4 - b_offset;
8263                 if (b_count > len) {
8264                         /* i.e. offset=1 len=2 */
8265                         b_count = len;
8266                 }
8267                 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
8268                 if (ret)
8269                         return ret;
8270                 memcpy(data, ((char*)&val) + b_offset, b_count);
8271                 len -= b_count;
8272                 offset += b_count;
8273                 eeprom->len += b_count;
8274         }
8275
8276         /* read bytes upto the last 4 byte boundary */
8277         pd = &data[eeprom->len];
8278         for (i = 0; i < (len - (len & 3)); i += 4) {
8279                 ret = tg3_nvram_read_le(tp, offset + i, &val);
8280                 if (ret) {
8281                         eeprom->len += i;
8282                         return ret;
8283                 }
8284                 memcpy(pd + i, &val, 4);
8285         }
8286         eeprom->len += i;
8287
8288         if (len & 3) {
8289                 /* read last bytes not ending on 4 byte boundary */
8290                 pd = &data[eeprom->len];
8291                 b_count = len & 3;
8292                 b_offset = offset + len - b_count;
8293                 ret = tg3_nvram_read_le(tp, b_offset, &val);
8294                 if (ret)
8295                         return ret;
8296                 memcpy(pd, &val, b_count);
8297                 eeprom->len += b_count;
8298         }
8299         return 0;
8300 }
8301
8302 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8303
8304 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8305 {
8306         struct tg3 *tp = netdev_priv(dev);
8307         int ret;
8308         u32 offset, len, b_offset, odd_len;
8309         u8 *buf;
8310         __le32 start, end;
8311
8312         if (tp->link_config.phy_is_low_power)
8313                 return -EAGAIN;
8314
8315         if (eeprom->magic != TG3_EEPROM_MAGIC)
8316                 return -EINVAL;
8317
8318         offset = eeprom->offset;
8319         len = eeprom->len;
8320
8321         if ((b_offset = (offset & 3))) {
8322                 /* adjustments to start on required 4 byte boundary */
8323                 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
8324                 if (ret)
8325                         return ret;
8326                 len += b_offset;
8327                 offset &= ~3;
8328                 if (len < 4)
8329                         len = 4;
8330         }
8331
8332         odd_len = 0;
8333         if (len & 3) {
8334                 /* adjustments to end on required 4 byte boundary */
8335                 odd_len = 1;
8336                 len = (len + 3) & ~3;
8337                 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
8338                 if (ret)
8339                         return ret;
8340         }
8341
8342         buf = data;
8343         if (b_offset || odd_len) {
8344                 buf = kmalloc(len, GFP_KERNEL);
8345                 if (!buf)
8346                         return -ENOMEM;
8347                 if (b_offset)
8348                         memcpy(buf, &start, 4);
8349                 if (odd_len)
8350                         memcpy(buf+len-4, &end, 4);
8351                 memcpy(buf + b_offset, data, eeprom->len);
8352         }
8353
8354         ret = tg3_nvram_write_block(tp, offset, len, buf);
8355
8356         if (buf != data)
8357                 kfree(buf);
8358
8359         return ret;
8360 }
8361
8362 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8363 {
8364         struct tg3 *tp = netdev_priv(dev);
8365
8366         cmd->supported = (SUPPORTED_Autoneg);
8367
8368         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8369                 cmd->supported |= (SUPPORTED_1000baseT_Half |
8370                                    SUPPORTED_1000baseT_Full);
8371
8372         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8373                 cmd->supported |= (SUPPORTED_100baseT_Half |
8374                                   SUPPORTED_100baseT_Full |
8375                                   SUPPORTED_10baseT_Half |
8376                                   SUPPORTED_10baseT_Full |
8377                                   SUPPORTED_TP);
8378                 cmd->port = PORT_TP;
8379         } else {
8380                 cmd->supported |= SUPPORTED_FIBRE;
8381                 cmd->port = PORT_FIBRE;
8382         }
8383
8384         cmd->advertising = tp->link_config.advertising;
8385         if (netif_running(dev)) {
8386                 cmd->speed = tp->link_config.active_speed;
8387                 cmd->duplex = tp->link_config.active_duplex;
8388         }
8389         cmd->phy_address = PHY_ADDR;
8390         cmd->transceiver = 0;
8391         cmd->autoneg = tp->link_config.autoneg;
8392         cmd->maxtxpkt = 0;
8393         cmd->maxrxpkt = 0;
8394         return 0;
8395 }
8396
8397 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8398 {
8399         struct tg3 *tp = netdev_priv(dev);
8400
8401         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8402                 /* These are the only valid advertisement bits allowed.  */
8403                 if (cmd->autoneg == AUTONEG_ENABLE &&
8404                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8405                                           ADVERTISED_1000baseT_Full |
8406                                           ADVERTISED_Autoneg |
8407                                           ADVERTISED_FIBRE)))
8408                         return -EINVAL;
8409                 /* Fiber can only do SPEED_1000.  */
8410                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8411                          (cmd->speed != SPEED_1000))
8412                         return -EINVAL;
8413         /* Copper cannot force SPEED_1000.  */
8414         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8415                    (cmd->speed == SPEED_1000))
8416                 return -EINVAL;
8417         else if ((cmd->speed == SPEED_1000) &&
8418                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8419                 return -EINVAL;
8420
8421         tg3_full_lock(tp, 0);
8422
8423         tp->link_config.autoneg = cmd->autoneg;
8424         if (cmd->autoneg == AUTONEG_ENABLE) {
8425                 tp->link_config.advertising = (cmd->advertising |
8426                                               ADVERTISED_Autoneg);
8427                 tp->link_config.speed = SPEED_INVALID;
8428                 tp->link_config.duplex = DUPLEX_INVALID;
8429         } else {
8430                 tp->link_config.advertising = 0;
8431                 tp->link_config.speed = cmd->speed;
8432                 tp->link_config.duplex = cmd->duplex;
8433         }
8434
8435         tp->link_config.orig_speed = tp->link_config.speed;
8436         tp->link_config.orig_duplex = tp->link_config.duplex;
8437         tp->link_config.orig_autoneg = tp->link_config.autoneg;
8438
8439         if (netif_running(dev))
8440                 tg3_setup_phy(tp, 1);
8441
8442         tg3_full_unlock(tp);
8443
8444         return 0;
8445 }
8446
8447 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8448 {
8449         struct tg3 *tp = netdev_priv(dev);
8450
8451         strcpy(info->driver, DRV_MODULE_NAME);
8452         strcpy(info->version, DRV_MODULE_VERSION);
8453         strcpy(info->fw_version, tp->fw_ver);
8454         strcpy(info->bus_info, pci_name(tp->pdev));
8455 }
8456
8457 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8458 {
8459         struct tg3 *tp = netdev_priv(dev);
8460
8461         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8462                 wol->supported = WAKE_MAGIC;
8463         else
8464                 wol->supported = 0;
8465         wol->wolopts = 0;
8466         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8467                 wol->wolopts = WAKE_MAGIC;
8468         memset(&wol->sopass, 0, sizeof(wol->sopass));
8469 }
8470
8471 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8472 {
8473         struct tg3 *tp = netdev_priv(dev);
8474
8475         if (wol->wolopts & ~WAKE_MAGIC)
8476                 return -EINVAL;
8477         if ((wol->wolopts & WAKE_MAGIC) &&
8478             !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
8479                 return -EINVAL;
8480
8481         spin_lock_bh(&tp->lock);
8482         if (wol->wolopts & WAKE_MAGIC)
8483                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8484         else
8485                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8486         spin_unlock_bh(&tp->lock);
8487
8488         return 0;
8489 }
8490
8491 static u32 tg3_get_msglevel(struct net_device *dev)
8492 {
8493         struct tg3 *tp = netdev_priv(dev);
8494         return tp->msg_enable;
8495 }
8496
8497 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8498 {
8499         struct tg3 *tp = netdev_priv(dev);
8500         tp->msg_enable = value;
8501 }
8502
8503 static int tg3_set_tso(struct net_device *dev, u32 value)
8504 {
8505         struct tg3 *tp = netdev_priv(dev);
8506
8507         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8508                 if (value)
8509                         return -EINVAL;
8510                 return 0;
8511         }
8512         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8513             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8514                 if (value) {
8515                         dev->features |= NETIF_F_TSO6;
8516                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8517                                 dev->features |= NETIF_F_TSO_ECN;
8518                 } else
8519                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
8520         }
8521         return ethtool_op_set_tso(dev, value);
8522 }
8523
8524 static int tg3_nway_reset(struct net_device *dev)
8525 {
8526         struct tg3 *tp = netdev_priv(dev);
8527         u32 bmcr;
8528         int r;
8529
8530         if (!netif_running(dev))
8531                 return -EAGAIN;
8532
8533         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8534                 return -EINVAL;
8535
8536         spin_lock_bh(&tp->lock);
8537         r = -EINVAL;
8538         tg3_readphy(tp, MII_BMCR, &bmcr);
8539         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8540             ((bmcr & BMCR_ANENABLE) ||
8541              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8542                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8543                                            BMCR_ANENABLE);
8544                 r = 0;
8545         }
8546         spin_unlock_bh(&tp->lock);
8547
8548         return r;
8549 }
8550
8551 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8552 {
8553         struct tg3 *tp = netdev_priv(dev);
8554
8555         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8556         ering->rx_mini_max_pending = 0;
8557         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8558                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8559         else
8560                 ering->rx_jumbo_max_pending = 0;
8561
8562         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8563
8564         ering->rx_pending = tp->rx_pending;
8565         ering->rx_mini_pending = 0;
8566         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8567                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8568         else
8569                 ering->rx_jumbo_pending = 0;
8570
8571         ering->tx_pending = tp->tx_pending;
8572 }
8573
8574 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8575 {
8576         struct tg3 *tp = netdev_priv(dev);
8577         int irq_sync = 0, err = 0;
8578
8579         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8580             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8581             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8582             (ering->tx_pending <= MAX_SKB_FRAGS) ||
8583             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
8584              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8585                 return -EINVAL;
8586
8587         if (netif_running(dev)) {
8588                 tg3_netif_stop(tp);
8589                 irq_sync = 1;
8590         }
8591
8592         tg3_full_lock(tp, irq_sync);
8593
8594         tp->rx_pending = ering->rx_pending;
8595
8596         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8597             tp->rx_pending > 63)
8598                 tp->rx_pending = 63;
8599         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8600         tp->tx_pending = ering->tx_pending;
8601
8602         if (netif_running(dev)) {
8603                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8604                 err = tg3_restart_hw(tp, 1);
8605                 if (!err)
8606                         tg3_netif_start(tp);
8607         }
8608
8609         tg3_full_unlock(tp);
8610
8611         return err;
8612 }
8613
8614 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8615 {
8616         struct tg3 *tp = netdev_priv(dev);
8617
8618         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8619
8620         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
8621                 epause->rx_pause = 1;
8622         else
8623                 epause->rx_pause = 0;
8624
8625         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
8626                 epause->tx_pause = 1;
8627         else
8628                 epause->tx_pause = 0;
8629 }
8630
8631 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8632 {
8633         struct tg3 *tp = netdev_priv(dev);
8634         int irq_sync = 0, err = 0;
8635
8636         if (netif_running(dev)) {
8637                 tg3_netif_stop(tp);
8638                 irq_sync = 1;
8639         }
8640
8641         tg3_full_lock(tp, irq_sync);
8642
8643         if (epause->autoneg)
8644                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8645         else
8646                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8647         if (epause->rx_pause)
8648                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
8649         else
8650                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
8651         if (epause->tx_pause)
8652                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
8653         else
8654                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
8655
8656         if (netif_running(dev)) {
8657                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8658                 err = tg3_restart_hw(tp, 1);
8659                 if (!err)
8660                         tg3_netif_start(tp);
8661         }
8662
8663         tg3_full_unlock(tp);
8664
8665         return err;
8666 }
8667
8668 static u32 tg3_get_rx_csum(struct net_device *dev)
8669 {
8670         struct tg3 *tp = netdev_priv(dev);
8671         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8672 }
8673
8674 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8675 {
8676         struct tg3 *tp = netdev_priv(dev);
8677
8678         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8679                 if (data != 0)
8680                         return -EINVAL;
8681                 return 0;
8682         }
8683
8684         spin_lock_bh(&tp->lock);
8685         if (data)
8686                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8687         else
8688                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8689         spin_unlock_bh(&tp->lock);
8690
8691         return 0;
8692 }
8693
8694 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8695 {
8696         struct tg3 *tp = netdev_priv(dev);
8697
8698         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8699                 if (data != 0)
8700                         return -EINVAL;
8701                 return 0;
8702         }
8703
8704         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8705             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
8706             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8707             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8708                 ethtool_op_set_tx_ipv6_csum(dev, data);
8709         else
8710                 ethtool_op_set_tx_csum(dev, data);
8711
8712         return 0;
8713 }
8714
8715 static int tg3_get_sset_count (struct net_device *dev, int sset)
8716 {
8717         switch (sset) {
8718         case ETH_SS_TEST:
8719                 return TG3_NUM_TEST;
8720         case ETH_SS_STATS:
8721                 return TG3_NUM_STATS;
8722         default:
8723                 return -EOPNOTSUPP;
8724         }
8725 }
8726
8727 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8728 {
8729         switch (stringset) {
8730         case ETH_SS_STATS:
8731                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8732                 break;
8733         case ETH_SS_TEST:
8734                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8735                 break;
8736         default:
8737                 WARN_ON(1);     /* we need a WARN() */
8738                 break;
8739         }
8740 }
8741
8742 static int tg3_phys_id(struct net_device *dev, u32 data)
8743 {
8744         struct tg3 *tp = netdev_priv(dev);
8745         int i;
8746
8747         if (!netif_running(tp->dev))
8748                 return -EAGAIN;
8749
8750         if (data == 0)
8751                 data = 2;
8752
8753         for (i = 0; i < (data * 2); i++) {
8754                 if ((i % 2) == 0)
8755                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8756                                            LED_CTRL_1000MBPS_ON |
8757                                            LED_CTRL_100MBPS_ON |
8758                                            LED_CTRL_10MBPS_ON |
8759                                            LED_CTRL_TRAFFIC_OVERRIDE |
8760                                            LED_CTRL_TRAFFIC_BLINK |
8761                                            LED_CTRL_TRAFFIC_LED);
8762
8763                 else
8764                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8765                                            LED_CTRL_TRAFFIC_OVERRIDE);
8766
8767                 if (msleep_interruptible(500))
8768                         break;
8769         }
8770         tw32(MAC_LED_CTRL, tp->led_ctrl);
8771         return 0;
8772 }
8773
8774 static void tg3_get_ethtool_stats (struct net_device *dev,
8775                                    struct ethtool_stats *estats, u64 *tmp_stats)
8776 {
8777         struct tg3 *tp = netdev_priv(dev);
8778         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8779 }
8780
8781 #define NVRAM_TEST_SIZE 0x100
8782 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
8783 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
8784 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
8785 #define NVRAM_SELFBOOT_HW_SIZE 0x20
8786 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
8787
8788 static int tg3_test_nvram(struct tg3 *tp)
8789 {
8790         u32 csum, magic;
8791         __le32 *buf;
8792         int i, j, k, err = 0, size;
8793
8794         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8795                 return -EIO;
8796
8797         if (magic == TG3_EEPROM_MAGIC)
8798                 size = NVRAM_TEST_SIZE;
8799         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
8800                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
8801                     TG3_EEPROM_SB_FORMAT_1) {
8802                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
8803                         case TG3_EEPROM_SB_REVISION_0:
8804                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
8805                                 break;
8806                         case TG3_EEPROM_SB_REVISION_2:
8807                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
8808                                 break;
8809                         case TG3_EEPROM_SB_REVISION_3:
8810                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
8811                                 break;
8812                         default:
8813                                 return 0;
8814                         }
8815                 } else
8816                         return 0;
8817         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8818                 size = NVRAM_SELFBOOT_HW_SIZE;
8819         else
8820                 return -EIO;
8821
8822         buf = kmalloc(size, GFP_KERNEL);
8823         if (buf == NULL)
8824                 return -ENOMEM;
8825
8826         err = -EIO;
8827         for (i = 0, j = 0; i < size; i += 4, j++) {
8828                 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
8829                         break;
8830         }
8831         if (i < size)
8832                 goto out;
8833
8834         /* Selfboot format */
8835         magic = swab32(le32_to_cpu(buf[0]));
8836         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
8837             TG3_EEPROM_MAGIC_FW) {
8838                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8839
8840                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
8841                     TG3_EEPROM_SB_REVISION_2) {
8842                         /* For rev 2, the csum doesn't include the MBA. */
8843                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
8844                                 csum8 += buf8[i];
8845                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
8846                                 csum8 += buf8[i];
8847                 } else {
8848                         for (i = 0; i < size; i++)
8849                                 csum8 += buf8[i];
8850                 }
8851
8852                 if (csum8 == 0) {
8853                         err = 0;
8854                         goto out;
8855                 }
8856
8857                 err = -EIO;
8858                 goto out;
8859         }
8860
8861         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
8862             TG3_EEPROM_MAGIC_HW) {
8863                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8864                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8865                 u8 *buf8 = (u8 *) buf;
8866
8867                 /* Separate the parity bits and the data bytes.  */
8868                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8869                         if ((i == 0) || (i == 8)) {
8870                                 int l;
8871                                 u8 msk;
8872
8873                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8874                                         parity[k++] = buf8[i] & msk;
8875                                 i++;
8876                         }
8877                         else if (i == 16) {
8878                                 int l;
8879                                 u8 msk;
8880
8881                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
8882                                         parity[k++] = buf8[i] & msk;
8883                                 i++;
8884
8885                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
8886                                         parity[k++] = buf8[i] & msk;
8887                                 i++;
8888                         }
8889                         data[j++] = buf8[i];
8890                 }
8891
8892                 err = -EIO;
8893                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
8894                         u8 hw8 = hweight8(data[i]);
8895
8896                         if ((hw8 & 0x1) && parity[i])
8897                                 goto out;
8898                         else if (!(hw8 & 0x1) && !parity[i])
8899                                 goto out;
8900                 }
8901                 err = 0;
8902                 goto out;
8903         }
8904
8905         /* Bootstrap checksum at offset 0x10 */
8906         csum = calc_crc((unsigned char *) buf, 0x10);
8907         if(csum != le32_to_cpu(buf[0x10/4]))
8908                 goto out;
8909
8910         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8911         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8912         if (csum != le32_to_cpu(buf[0xfc/4]))
8913                  goto out;
8914
8915         err = 0;
8916
8917 out:
8918         kfree(buf);
8919         return err;
8920 }
8921
8922 #define TG3_SERDES_TIMEOUT_SEC  2
8923 #define TG3_COPPER_TIMEOUT_SEC  6
8924
8925 static int tg3_test_link(struct tg3 *tp)
8926 {
8927         int i, max;
8928
8929         if (!netif_running(tp->dev))
8930                 return -ENODEV;
8931
8932         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8933                 max = TG3_SERDES_TIMEOUT_SEC;
8934         else
8935                 max = TG3_COPPER_TIMEOUT_SEC;
8936
8937         for (i = 0; i < max; i++) {
8938                 if (netif_carrier_ok(tp->dev))
8939                         return 0;
8940
8941                 if (msleep_interruptible(1000))
8942                         break;
8943         }
8944
8945         return -EIO;
8946 }
8947
8948 /* Only test the commonly used registers */
8949 static int tg3_test_registers(struct tg3 *tp)
8950 {
8951         int i, is_5705, is_5750;
8952         u32 offset, read_mask, write_mask, val, save_val, read_val;
8953         static struct {
8954                 u16 offset;
8955                 u16 flags;
8956 #define TG3_FL_5705     0x1
8957 #define TG3_FL_NOT_5705 0x2
8958 #define TG3_FL_NOT_5788 0x4
8959 #define TG3_FL_NOT_5750 0x8
8960                 u32 read_mask;
8961                 u32 write_mask;
8962         } reg_tbl[] = {
8963                 /* MAC Control Registers */
8964                 { MAC_MODE, TG3_FL_NOT_5705,
8965                         0x00000000, 0x00ef6f8c },
8966                 { MAC_MODE, TG3_FL_5705,
8967                         0x00000000, 0x01ef6b8c },
8968                 { MAC_STATUS, TG3_FL_NOT_5705,
8969                         0x03800107, 0x00000000 },
8970                 { MAC_STATUS, TG3_FL_5705,
8971                         0x03800100, 0x00000000 },
8972                 { MAC_ADDR_0_HIGH, 0x0000,
8973                         0x00000000, 0x0000ffff },
8974                 { MAC_ADDR_0_LOW, 0x0000,
8975                         0x00000000, 0xffffffff },
8976                 { MAC_RX_MTU_SIZE, 0x0000,
8977                         0x00000000, 0x0000ffff },
8978                 { MAC_TX_MODE, 0x0000,
8979                         0x00000000, 0x00000070 },
8980                 { MAC_TX_LENGTHS, 0x0000,
8981                         0x00000000, 0x00003fff },
8982                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8983                         0x00000000, 0x000007fc },
8984                 { MAC_RX_MODE, TG3_FL_5705,
8985                         0x00000000, 0x000007dc },
8986                 { MAC_HASH_REG_0, 0x0000,
8987                         0x00000000, 0xffffffff },
8988                 { MAC_HASH_REG_1, 0x0000,
8989                         0x00000000, 0xffffffff },
8990                 { MAC_HASH_REG_2, 0x0000,
8991                         0x00000000, 0xffffffff },
8992                 { MAC_HASH_REG_3, 0x0000,
8993                         0x00000000, 0xffffffff },
8994
8995                 /* Receive Data and Receive BD Initiator Control Registers. */
8996                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8997                         0x00000000, 0xffffffff },
8998                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8999                         0x00000000, 0xffffffff },
9000                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9001                         0x00000000, 0x00000003 },
9002                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9003                         0x00000000, 0xffffffff },
9004                 { RCVDBDI_STD_BD+0, 0x0000,
9005                         0x00000000, 0xffffffff },
9006                 { RCVDBDI_STD_BD+4, 0x0000,
9007                         0x00000000, 0xffffffff },
9008                 { RCVDBDI_STD_BD+8, 0x0000,
9009                         0x00000000, 0xffff0002 },
9010                 { RCVDBDI_STD_BD+0xc, 0x0000,
9011                         0x00000000, 0xffffffff },
9012
9013                 /* Receive BD Initiator Control Registers. */
9014                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9015                         0x00000000, 0xffffffff },
9016                 { RCVBDI_STD_THRESH, TG3_FL_5705,
9017                         0x00000000, 0x000003ff },
9018                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9019                         0x00000000, 0xffffffff },
9020
9021                 /* Host Coalescing Control Registers. */
9022                 { HOSTCC_MODE, TG3_FL_NOT_5705,
9023                         0x00000000, 0x00000004 },
9024                 { HOSTCC_MODE, TG3_FL_5705,
9025                         0x00000000, 0x000000f6 },
9026                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9027                         0x00000000, 0xffffffff },
9028                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9029                         0x00000000, 0x000003ff },
9030                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9031                         0x00000000, 0xffffffff },
9032                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9033                         0x00000000, 0x000003ff },
9034                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9035                         0x00000000, 0xffffffff },
9036                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9037                         0x00000000, 0x000000ff },
9038                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9039                         0x00000000, 0xffffffff },
9040                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9041                         0x00000000, 0x000000ff },
9042                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9043                         0x00000000, 0xffffffff },
9044                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9045                         0x00000000, 0xffffffff },
9046                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9047                         0x00000000, 0xffffffff },
9048                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9049                         0x00000000, 0x000000ff },
9050                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9051                         0x00000000, 0xffffffff },
9052                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9053                         0x00000000, 0x000000ff },
9054                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9055                         0x00000000, 0xffffffff },
9056                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9057                         0x00000000, 0xffffffff },
9058                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9059                         0x00000000, 0xffffffff },
9060                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9061                         0x00000000, 0xffffffff },
9062                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9063                         0x00000000, 0xffffffff },
9064                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9065                         0xffffffff, 0x00000000 },
9066                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9067                         0xffffffff, 0x00000000 },
9068
9069                 /* Buffer Manager Control Registers. */
9070                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9071                         0x00000000, 0x007fff80 },
9072                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9073                         0x00000000, 0x007fffff },
9074                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9075                         0x00000000, 0x0000003f },
9076                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9077                         0x00000000, 0x000001ff },
9078                 { BUFMGR_MB_HIGH_WATER, 0x0000,
9079                         0x00000000, 0x000001ff },
9080                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9081                         0xffffffff, 0x00000000 },
9082                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9083                         0xffffffff, 0x00000000 },
9084
9085                 /* Mailbox Registers */
9086                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9087                         0x00000000, 0x000001ff },
9088                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9089                         0x00000000, 0x000001ff },
9090                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9091                         0x00000000, 0x000007ff },
9092                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9093                         0x00000000, 0x000001ff },
9094
9095                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9096         };
9097
9098         is_5705 = is_5750 = 0;
9099         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9100                 is_5705 = 1;
9101                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9102                         is_5750 = 1;
9103         }
9104
9105         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9106                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9107                         continue;
9108
9109                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9110                         continue;
9111
9112                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9113                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
9114                         continue;
9115
9116                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9117                         continue;
9118
9119                 offset = (u32) reg_tbl[i].offset;
9120                 read_mask = reg_tbl[i].read_mask;
9121                 write_mask = reg_tbl[i].write_mask;
9122
9123                 /* Save the original register content */
9124                 save_val = tr32(offset);
9125
9126                 /* Determine the read-only value. */
9127                 read_val = save_val & read_mask;
9128
9129                 /* Write zero to the register, then make sure the read-only bits
9130                  * are not changed and the read/write bits are all zeros.
9131                  */
9132                 tw32(offset, 0);
9133
9134                 val = tr32(offset);
9135
9136                 /* Test the read-only and read/write bits. */
9137                 if (((val & read_mask) != read_val) || (val & write_mask))
9138                         goto out;
9139
9140                 /* Write ones to all the bits defined by RdMask and WrMask, then
9141                  * make sure the read-only bits are not changed and the
9142                  * read/write bits are all ones.
9143                  */
9144                 tw32(offset, read_mask | write_mask);
9145
9146                 val = tr32(offset);
9147
9148                 /* Test the read-only bits. */
9149                 if ((val & read_mask) != read_val)
9150                         goto out;
9151
9152                 /* Test the read/write bits. */
9153                 if ((val & write_mask) != write_mask)
9154                         goto out;
9155
9156                 tw32(offset, save_val);
9157         }
9158
9159         return 0;
9160
9161 out:
9162         if (netif_msg_hw(tp))
9163                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9164                        offset);
9165         tw32(offset, save_val);
9166         return -EIO;
9167 }
9168
9169 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9170 {
9171         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9172         int i;
9173         u32 j;
9174
9175         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9176                 for (j = 0; j < len; j += 4) {
9177                         u32 val;
9178
9179                         tg3_write_mem(tp, offset + j, test_pattern[i]);
9180                         tg3_read_mem(tp, offset + j, &val);
9181                         if (val != test_pattern[i])
9182                                 return -EIO;
9183                 }
9184         }
9185         return 0;
9186 }
9187
9188 static int tg3_test_memory(struct tg3 *tp)
9189 {
9190         static struct mem_entry {
9191                 u32 offset;
9192                 u32 len;
9193         } mem_tbl_570x[] = {
9194                 { 0x00000000, 0x00b50},
9195                 { 0x00002000, 0x1c000},
9196                 { 0xffffffff, 0x00000}
9197         }, mem_tbl_5705[] = {
9198                 { 0x00000100, 0x0000c},
9199                 { 0x00000200, 0x00008},
9200                 { 0x00004000, 0x00800},
9201                 { 0x00006000, 0x01000},
9202                 { 0x00008000, 0x02000},
9203                 { 0x00010000, 0x0e000},
9204                 { 0xffffffff, 0x00000}
9205         }, mem_tbl_5755[] = {
9206                 { 0x00000200, 0x00008},
9207                 { 0x00004000, 0x00800},
9208                 { 0x00006000, 0x00800},
9209                 { 0x00008000, 0x02000},
9210                 { 0x00010000, 0x0c000},
9211                 { 0xffffffff, 0x00000}
9212         }, mem_tbl_5906[] = {
9213                 { 0x00000200, 0x00008},
9214                 { 0x00004000, 0x00400},
9215                 { 0x00006000, 0x00400},
9216                 { 0x00008000, 0x01000},
9217                 { 0x00010000, 0x01000},
9218                 { 0xffffffff, 0x00000}
9219         };
9220         struct mem_entry *mem_tbl;
9221         int err = 0;
9222         int i;
9223
9224         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9225                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9226                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9227                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9228                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9229                         mem_tbl = mem_tbl_5755;
9230                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9231                         mem_tbl = mem_tbl_5906;
9232                 else
9233                         mem_tbl = mem_tbl_5705;
9234         } else
9235                 mem_tbl = mem_tbl_570x;
9236
9237         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9238                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9239                     mem_tbl[i].len)) != 0)
9240                         break;
9241         }
9242
9243         return err;
9244 }
9245
9246 #define TG3_MAC_LOOPBACK        0
9247 #define TG3_PHY_LOOPBACK        1
9248
9249 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9250 {
9251         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9252         u32 desc_idx;
9253         struct sk_buff *skb, *rx_skb;
9254         u8 *tx_data;
9255         dma_addr_t map;
9256         int num_pkts, tx_len, rx_len, i, err;
9257         struct tg3_rx_buffer_desc *desc;
9258
9259         if (loopback_mode == TG3_MAC_LOOPBACK) {
9260                 /* HW errata - mac loopback fails in some cases on 5780.
9261                  * Normal traffic and PHY loopback are not affected by
9262                  * errata.
9263                  */
9264                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9265                         return 0;
9266
9267                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9268                            MAC_MODE_PORT_INT_LPBACK;
9269                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9270                         mac_mode |= MAC_MODE_LINK_POLARITY;
9271                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9272                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9273                 else
9274                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9275                 tw32(MAC_MODE, mac_mode);
9276         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9277                 u32 val;
9278
9279                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9280                         u32 phytest;
9281
9282                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9283                                 u32 phy;
9284
9285                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9286                                              phytest | MII_TG3_EPHY_SHADOW_EN);
9287                                 if (!tg3_readphy(tp, 0x1b, &phy))
9288                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
9289                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9290                         }
9291                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9292                 } else
9293                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9294
9295                 tg3_phy_toggle_automdix(tp, 0);
9296
9297                 tg3_writephy(tp, MII_BMCR, val);
9298                 udelay(40);
9299
9300                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9301                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9302                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9303                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9304                 } else
9305                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9306
9307                 /* reset to prevent losing 1st rx packet intermittently */
9308                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9309                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9310                         udelay(10);
9311                         tw32_f(MAC_RX_MODE, tp->rx_mode);
9312                 }
9313                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9314                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9315                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9316                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9317                                 mac_mode |= MAC_MODE_LINK_POLARITY;
9318                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
9319                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9320                 }
9321                 tw32(MAC_MODE, mac_mode);
9322         }
9323         else
9324                 return -EINVAL;
9325
9326         err = -EIO;
9327
9328         tx_len = 1514;
9329         skb = netdev_alloc_skb(tp->dev, tx_len);
9330         if (!skb)
9331                 return -ENOMEM;
9332
9333         tx_data = skb_put(skb, tx_len);
9334         memcpy(tx_data, tp->dev->dev_addr, 6);
9335         memset(tx_data + 6, 0x0, 8);
9336
9337         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9338
9339         for (i = 14; i < tx_len; i++)
9340                 tx_data[i] = (u8) (i & 0xff);
9341
9342         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9343
9344         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9345              HOSTCC_MODE_NOW);
9346
9347         udelay(10);
9348
9349         rx_start_idx = tp->hw_status->idx[0].rx_producer;
9350
9351         num_pkts = 0;
9352
9353         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
9354
9355         tp->tx_prod++;
9356         num_pkts++;
9357
9358         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9359                      tp->tx_prod);
9360         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9361
9362         udelay(10);
9363
9364         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
9365         for (i = 0; i < 25; i++) {
9366                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9367                        HOSTCC_MODE_NOW);
9368
9369                 udelay(10);
9370
9371                 tx_idx = tp->hw_status->idx[0].tx_consumer;
9372                 rx_idx = tp->hw_status->idx[0].rx_producer;
9373                 if ((tx_idx == tp->tx_prod) &&
9374                     (rx_idx == (rx_start_idx + num_pkts)))
9375                         break;
9376         }
9377
9378         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9379         dev_kfree_skb(skb);
9380
9381         if (tx_idx != tp->tx_prod)
9382                 goto out;
9383
9384         if (rx_idx != rx_start_idx + num_pkts)
9385                 goto out;
9386
9387         desc = &tp->rx_rcb[rx_start_idx];
9388         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9389         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9390         if (opaque_key != RXD_OPAQUE_RING_STD)
9391                 goto out;
9392
9393         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9394             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9395                 goto out;
9396
9397         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9398         if (rx_len != tx_len)
9399                 goto out;
9400
9401         rx_skb = tp->rx_std_buffers[desc_idx].skb;
9402
9403         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9404         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9405
9406         for (i = 14; i < tx_len; i++) {
9407                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9408                         goto out;
9409         }
9410         err = 0;
9411
9412         /* tg3_free_rings will unmap and free the rx_skb */
9413 out:
9414         return err;
9415 }
9416
9417 #define TG3_MAC_LOOPBACK_FAILED         1
9418 #define TG3_PHY_LOOPBACK_FAILED         2
9419 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
9420                                          TG3_PHY_LOOPBACK_FAILED)
9421
9422 static int tg3_test_loopback(struct tg3 *tp)
9423 {
9424         int err = 0;
9425         u32 cpmuctrl = 0;
9426
9427         if (!netif_running(tp->dev))
9428                 return TG3_LOOPBACK_FAILED;
9429
9430         err = tg3_reset_hw(tp, 1);
9431         if (err)
9432                 return TG3_LOOPBACK_FAILED;
9433
9434         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
9435                 int i;
9436                 u32 status;
9437
9438                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9439
9440                 /* Wait for up to 40 microseconds to acquire lock. */
9441                 for (i = 0; i < 4; i++) {
9442                         status = tr32(TG3_CPMU_MUTEX_GNT);
9443                         if (status == CPMU_MUTEX_GNT_DRIVER)
9444                                 break;
9445                         udelay(10);
9446                 }
9447
9448                 if (status != CPMU_MUTEX_GNT_DRIVER)
9449                         return TG3_LOOPBACK_FAILED;
9450
9451                 /* Turn off power management based on link speed. */
9452                 cpmuctrl = tr32(TG3_CPMU_CTRL);
9453                 tw32(TG3_CPMU_CTRL,
9454                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
9455                                   CPMU_CTRL_LINK_AWARE_MODE));
9456         }
9457
9458         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9459                 err |= TG3_MAC_LOOPBACK_FAILED;
9460
9461         if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
9462                 tw32(TG3_CPMU_CTRL, cpmuctrl);
9463
9464                 /* Release the mutex */
9465                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9466         }
9467
9468         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9469                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9470                         err |= TG3_PHY_LOOPBACK_FAILED;
9471         }
9472
9473         return err;
9474 }
9475
9476 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9477                           u64 *data)
9478 {
9479         struct tg3 *tp = netdev_priv(dev);
9480
9481         if (tp->link_config.phy_is_low_power)
9482                 tg3_set_power_state(tp, PCI_D0);
9483
9484         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9485
9486         if (tg3_test_nvram(tp) != 0) {
9487                 etest->flags |= ETH_TEST_FL_FAILED;
9488                 data[0] = 1;
9489         }
9490         if (tg3_test_link(tp) != 0) {
9491                 etest->flags |= ETH_TEST_FL_FAILED;
9492                 data[1] = 1;
9493         }
9494         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9495                 int err, irq_sync = 0;
9496
9497                 if (netif_running(dev)) {
9498                         tg3_netif_stop(tp);
9499                         irq_sync = 1;
9500                 }
9501
9502                 tg3_full_lock(tp, irq_sync);
9503
9504                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
9505                 err = tg3_nvram_lock(tp);
9506                 tg3_halt_cpu(tp, RX_CPU_BASE);
9507                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9508                         tg3_halt_cpu(tp, TX_CPU_BASE);
9509                 if (!err)
9510                         tg3_nvram_unlock(tp);
9511
9512                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9513                         tg3_phy_reset(tp);
9514
9515                 if (tg3_test_registers(tp) != 0) {
9516                         etest->flags |= ETH_TEST_FL_FAILED;
9517                         data[2] = 1;
9518                 }
9519                 if (tg3_test_memory(tp) != 0) {
9520                         etest->flags |= ETH_TEST_FL_FAILED;
9521                         data[3] = 1;
9522                 }
9523                 if ((data[4] = tg3_test_loopback(tp)) != 0)
9524                         etest->flags |= ETH_TEST_FL_FAILED;
9525
9526                 tg3_full_unlock(tp);
9527
9528                 if (tg3_test_interrupt(tp) != 0) {
9529                         etest->flags |= ETH_TEST_FL_FAILED;
9530                         data[5] = 1;
9531                 }
9532
9533                 tg3_full_lock(tp, 0);
9534
9535                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9536                 if (netif_running(dev)) {
9537                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9538                         if (!tg3_restart_hw(tp, 1))
9539                                 tg3_netif_start(tp);
9540                 }
9541
9542                 tg3_full_unlock(tp);
9543         }
9544         if (tp->link_config.phy_is_low_power)
9545                 tg3_set_power_state(tp, PCI_D3hot);
9546
9547 }
9548
9549 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9550 {
9551         struct mii_ioctl_data *data = if_mii(ifr);
9552         struct tg3 *tp = netdev_priv(dev);
9553         int err;
9554
9555         switch(cmd) {
9556         case SIOCGMIIPHY:
9557                 data->phy_id = PHY_ADDR;
9558
9559                 /* fallthru */
9560         case SIOCGMIIREG: {
9561                 u32 mii_regval;
9562
9563                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9564                         break;                  /* We have no PHY */
9565
9566                 if (tp->link_config.phy_is_low_power)
9567                         return -EAGAIN;
9568
9569                 spin_lock_bh(&tp->lock);
9570                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9571                 spin_unlock_bh(&tp->lock);
9572
9573                 data->val_out = mii_regval;
9574
9575                 return err;
9576         }
9577
9578         case SIOCSMIIREG:
9579                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9580                         break;                  /* We have no PHY */
9581
9582                 if (!capable(CAP_NET_ADMIN))
9583                         return -EPERM;
9584
9585                 if (tp->link_config.phy_is_low_power)
9586                         return -EAGAIN;
9587
9588                 spin_lock_bh(&tp->lock);
9589                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9590                 spin_unlock_bh(&tp->lock);
9591
9592                 return err;
9593
9594         default:
9595                 /* do nothing */
9596                 break;
9597         }
9598         return -EOPNOTSUPP;
9599 }
9600
9601 #if TG3_VLAN_TAG_USED
9602 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9603 {
9604         struct tg3 *tp = netdev_priv(dev);
9605
9606         if (netif_running(dev))
9607                 tg3_netif_stop(tp);
9608
9609         tg3_full_lock(tp, 0);
9610
9611         tp->vlgrp = grp;
9612
9613         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9614         __tg3_set_rx_mode(dev);
9615
9616         if (netif_running(dev))
9617                 tg3_netif_start(tp);
9618
9619         tg3_full_unlock(tp);
9620 }
9621 #endif
9622
9623 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9624 {
9625         struct tg3 *tp = netdev_priv(dev);
9626
9627         memcpy(ec, &tp->coal, sizeof(*ec));
9628         return 0;
9629 }
9630
9631 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9632 {
9633         struct tg3 *tp = netdev_priv(dev);
9634         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9635         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9636
9637         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9638                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9639                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9640                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9641                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9642         }
9643
9644         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9645             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9646             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9647             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9648             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9649             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9650             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9651             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9652             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9653             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9654                 return -EINVAL;
9655
9656         /* No rx interrupts will be generated if both are zero */
9657         if ((ec->rx_coalesce_usecs == 0) &&
9658             (ec->rx_max_coalesced_frames == 0))
9659                 return -EINVAL;
9660
9661         /* No tx interrupts will be generated if both are zero */
9662         if ((ec->tx_coalesce_usecs == 0) &&
9663             (ec->tx_max_coalesced_frames == 0))
9664                 return -EINVAL;
9665
9666         /* Only copy relevant parameters, ignore all others. */
9667         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9668         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9669         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9670         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9671         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9672         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9673         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9674         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9675         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9676
9677         if (netif_running(dev)) {
9678                 tg3_full_lock(tp, 0);
9679                 __tg3_set_coalesce(tp, &tp->coal);
9680                 tg3_full_unlock(tp);
9681         }
9682         return 0;
9683 }
9684
9685 static const struct ethtool_ops tg3_ethtool_ops = {
9686         .get_settings           = tg3_get_settings,
9687         .set_settings           = tg3_set_settings,
9688         .get_drvinfo            = tg3_get_drvinfo,
9689         .get_regs_len           = tg3_get_regs_len,
9690         .get_regs               = tg3_get_regs,
9691         .get_wol                = tg3_get_wol,
9692         .set_wol                = tg3_set_wol,
9693         .get_msglevel           = tg3_get_msglevel,
9694         .set_msglevel           = tg3_set_msglevel,
9695         .nway_reset             = tg3_nway_reset,
9696         .get_link               = ethtool_op_get_link,
9697         .get_eeprom_len         = tg3_get_eeprom_len,
9698         .get_eeprom             = tg3_get_eeprom,
9699         .set_eeprom             = tg3_set_eeprom,
9700         .get_ringparam          = tg3_get_ringparam,
9701         .set_ringparam          = tg3_set_ringparam,
9702         .get_pauseparam         = tg3_get_pauseparam,
9703         .set_pauseparam         = tg3_set_pauseparam,
9704         .get_rx_csum            = tg3_get_rx_csum,
9705         .set_rx_csum            = tg3_set_rx_csum,
9706         .set_tx_csum            = tg3_set_tx_csum,
9707         .set_sg                 = ethtool_op_set_sg,
9708         .set_tso                = tg3_set_tso,
9709         .self_test              = tg3_self_test,
9710         .get_strings            = tg3_get_strings,
9711         .phys_id                = tg3_phys_id,
9712         .get_ethtool_stats      = tg3_get_ethtool_stats,
9713         .get_coalesce           = tg3_get_coalesce,
9714         .set_coalesce           = tg3_set_coalesce,
9715         .get_sset_count         = tg3_get_sset_count,
9716 };
9717
9718 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9719 {
9720         u32 cursize, val, magic;
9721
9722         tp->nvram_size = EEPROM_CHIP_SIZE;
9723
9724         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9725                 return;
9726
9727         if ((magic != TG3_EEPROM_MAGIC) &&
9728             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9729             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
9730                 return;
9731
9732         /*
9733          * Size the chip by reading offsets at increasing powers of two.
9734          * When we encounter our validation signature, we know the addressing
9735          * has wrapped around, and thus have our chip size.
9736          */
9737         cursize = 0x10;
9738
9739         while (cursize < tp->nvram_size) {
9740                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9741                         return;
9742
9743                 if (val == magic)
9744                         break;
9745
9746                 cursize <<= 1;
9747         }
9748
9749         tp->nvram_size = cursize;
9750 }
9751
9752 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9753 {
9754         u32 val;
9755
9756         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9757                 return;
9758
9759         /* Selfboot format */
9760         if (val != TG3_EEPROM_MAGIC) {
9761                 tg3_get_eeprom_size(tp);
9762                 return;
9763         }
9764
9765         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9766                 if (val != 0) {
9767                         tp->nvram_size = (val >> 16) * 1024;
9768                         return;
9769                 }
9770         }
9771         tp->nvram_size = 0x80000;
9772 }
9773
9774 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9775 {
9776         u32 nvcfg1;
9777
9778         nvcfg1 = tr32(NVRAM_CFG1);
9779         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9780                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9781         }
9782         else {
9783                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9784                 tw32(NVRAM_CFG1, nvcfg1);
9785         }
9786
9787         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9788             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9789                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9790                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9791                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9792                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9793                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9794                                 break;
9795                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9796                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9797                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9798                                 break;
9799                         case FLASH_VENDOR_ATMEL_EEPROM:
9800                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9801                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9802                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9803                                 break;
9804                         case FLASH_VENDOR_ST:
9805                                 tp->nvram_jedecnum = JEDEC_ST;
9806                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9807                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9808                                 break;
9809                         case FLASH_VENDOR_SAIFUN:
9810                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9811                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9812                                 break;
9813                         case FLASH_VENDOR_SST_SMALL:
9814                         case FLASH_VENDOR_SST_LARGE:
9815                                 tp->nvram_jedecnum = JEDEC_SST;
9816                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9817                                 break;
9818                 }
9819         }
9820         else {
9821                 tp->nvram_jedecnum = JEDEC_ATMEL;
9822                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9823                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9824         }
9825 }
9826
9827 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9828 {
9829         u32 nvcfg1;
9830
9831         nvcfg1 = tr32(NVRAM_CFG1);
9832
9833         /* NVRAM protection for TPM */
9834         if (nvcfg1 & (1 << 27))
9835                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9836
9837         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9838                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9839                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9840                         tp->nvram_jedecnum = JEDEC_ATMEL;
9841                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9842                         break;
9843                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9844                         tp->nvram_jedecnum = JEDEC_ATMEL;
9845                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9846                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9847                         break;
9848                 case FLASH_5752VENDOR_ST_M45PE10:
9849                 case FLASH_5752VENDOR_ST_M45PE20:
9850                 case FLASH_5752VENDOR_ST_M45PE40:
9851                         tp->nvram_jedecnum = JEDEC_ST;
9852                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9853                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9854                         break;
9855         }
9856
9857         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9858                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9859                         case FLASH_5752PAGE_SIZE_256:
9860                                 tp->nvram_pagesize = 256;
9861                                 break;
9862                         case FLASH_5752PAGE_SIZE_512:
9863                                 tp->nvram_pagesize = 512;
9864                                 break;
9865                         case FLASH_5752PAGE_SIZE_1K:
9866                                 tp->nvram_pagesize = 1024;
9867                                 break;
9868                         case FLASH_5752PAGE_SIZE_2K:
9869                                 tp->nvram_pagesize = 2048;
9870                                 break;
9871                         case FLASH_5752PAGE_SIZE_4K:
9872                                 tp->nvram_pagesize = 4096;
9873                                 break;
9874                         case FLASH_5752PAGE_SIZE_264:
9875                                 tp->nvram_pagesize = 264;
9876                                 break;
9877                 }
9878         }
9879         else {
9880                 /* For eeprom, set pagesize to maximum eeprom size */
9881                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9882
9883                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9884                 tw32(NVRAM_CFG1, nvcfg1);
9885         }
9886 }
9887
9888 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9889 {
9890         u32 nvcfg1, protect = 0;
9891
9892         nvcfg1 = tr32(NVRAM_CFG1);
9893
9894         /* NVRAM protection for TPM */
9895         if (nvcfg1 & (1 << 27)) {
9896                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9897                 protect = 1;
9898         }
9899
9900         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9901         switch (nvcfg1) {
9902                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9903                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9904                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9905                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
9906                         tp->nvram_jedecnum = JEDEC_ATMEL;
9907                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9908                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9909                         tp->nvram_pagesize = 264;
9910                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
9911                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
9912                                 tp->nvram_size = (protect ? 0x3e200 : 0x80000);
9913                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
9914                                 tp->nvram_size = (protect ? 0x1f200 : 0x40000);
9915                         else
9916                                 tp->nvram_size = (protect ? 0x1f200 : 0x20000);
9917                         break;
9918                 case FLASH_5752VENDOR_ST_M45PE10:
9919                 case FLASH_5752VENDOR_ST_M45PE20:
9920                 case FLASH_5752VENDOR_ST_M45PE40:
9921                         tp->nvram_jedecnum = JEDEC_ST;
9922                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9923                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9924                         tp->nvram_pagesize = 256;
9925                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
9926                                 tp->nvram_size = (protect ? 0x10000 : 0x20000);
9927                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
9928                                 tp->nvram_size = (protect ? 0x10000 : 0x40000);
9929                         else
9930                                 tp->nvram_size = (protect ? 0x20000 : 0x80000);
9931                         break;
9932         }
9933 }
9934
9935 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9936 {
9937         u32 nvcfg1;
9938
9939         nvcfg1 = tr32(NVRAM_CFG1);
9940
9941         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9942                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9943                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9944                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9945                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9946                         tp->nvram_jedecnum = JEDEC_ATMEL;
9947                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9948                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9949
9950                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9951                         tw32(NVRAM_CFG1, nvcfg1);
9952                         break;
9953                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9954                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9955                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9956                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9957                         tp->nvram_jedecnum = JEDEC_ATMEL;
9958                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9959                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9960                         tp->nvram_pagesize = 264;
9961                         break;
9962                 case FLASH_5752VENDOR_ST_M45PE10:
9963                 case FLASH_5752VENDOR_ST_M45PE20:
9964                 case FLASH_5752VENDOR_ST_M45PE40:
9965                         tp->nvram_jedecnum = JEDEC_ST;
9966                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9967                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9968                         tp->nvram_pagesize = 256;
9969                         break;
9970         }
9971 }
9972
9973 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
9974 {
9975         u32 nvcfg1, protect = 0;
9976
9977         nvcfg1 = tr32(NVRAM_CFG1);
9978
9979         /* NVRAM protection for TPM */
9980         if (nvcfg1 & (1 << 27)) {
9981                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9982                 protect = 1;
9983         }
9984
9985         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9986         switch (nvcfg1) {
9987                 case FLASH_5761VENDOR_ATMEL_ADB021D:
9988                 case FLASH_5761VENDOR_ATMEL_ADB041D:
9989                 case FLASH_5761VENDOR_ATMEL_ADB081D:
9990                 case FLASH_5761VENDOR_ATMEL_ADB161D:
9991                 case FLASH_5761VENDOR_ATMEL_MDB021D:
9992                 case FLASH_5761VENDOR_ATMEL_MDB041D:
9993                 case FLASH_5761VENDOR_ATMEL_MDB081D:
9994                 case FLASH_5761VENDOR_ATMEL_MDB161D:
9995                         tp->nvram_jedecnum = JEDEC_ATMEL;
9996                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9997                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9998                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
9999                         tp->nvram_pagesize = 256;
10000                         break;
10001                 case FLASH_5761VENDOR_ST_A_M45PE20:
10002                 case FLASH_5761VENDOR_ST_A_M45PE40:
10003                 case FLASH_5761VENDOR_ST_A_M45PE80:
10004                 case FLASH_5761VENDOR_ST_A_M45PE16:
10005                 case FLASH_5761VENDOR_ST_M_M45PE20:
10006                 case FLASH_5761VENDOR_ST_M_M45PE40:
10007                 case FLASH_5761VENDOR_ST_M_M45PE80:
10008                 case FLASH_5761VENDOR_ST_M_M45PE16:
10009                         tp->nvram_jedecnum = JEDEC_ST;
10010                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10011                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10012                         tp->nvram_pagesize = 256;
10013                         break;
10014         }
10015
10016         if (protect) {
10017                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10018         } else {
10019                 switch (nvcfg1) {
10020                         case FLASH_5761VENDOR_ATMEL_ADB161D:
10021                         case FLASH_5761VENDOR_ATMEL_MDB161D:
10022                         case FLASH_5761VENDOR_ST_A_M45PE16:
10023                         case FLASH_5761VENDOR_ST_M_M45PE16:
10024                                 tp->nvram_size = 0x100000;
10025                                 break;
10026                         case FLASH_5761VENDOR_ATMEL_ADB081D:
10027                         case FLASH_5761VENDOR_ATMEL_MDB081D:
10028                         case FLASH_5761VENDOR_ST_A_M45PE80:
10029                         case FLASH_5761VENDOR_ST_M_M45PE80:
10030                                 tp->nvram_size = 0x80000;
10031                                 break;
10032                         case FLASH_5761VENDOR_ATMEL_ADB041D:
10033                         case FLASH_5761VENDOR_ATMEL_MDB041D:
10034                         case FLASH_5761VENDOR_ST_A_M45PE40:
10035                         case FLASH_5761VENDOR_ST_M_M45PE40:
10036                                 tp->nvram_size = 0x40000;
10037                                 break;
10038                         case FLASH_5761VENDOR_ATMEL_ADB021D:
10039                         case FLASH_5761VENDOR_ATMEL_MDB021D:
10040                         case FLASH_5761VENDOR_ST_A_M45PE20:
10041                         case FLASH_5761VENDOR_ST_M_M45PE20:
10042                                 tp->nvram_size = 0x20000;
10043                                 break;
10044                 }
10045         }
10046 }
10047
10048 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10049 {
10050         tp->nvram_jedecnum = JEDEC_ATMEL;
10051         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10052         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10053 }
10054
10055 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10056 static void __devinit tg3_nvram_init(struct tg3 *tp)
10057 {
10058         tw32_f(GRC_EEPROM_ADDR,
10059              (EEPROM_ADDR_FSM_RESET |
10060               (EEPROM_DEFAULT_CLOCK_PERIOD <<
10061                EEPROM_ADDR_CLKPERD_SHIFT)));
10062
10063         msleep(1);
10064
10065         /* Enable seeprom accesses. */
10066         tw32_f(GRC_LOCAL_CTRL,
10067              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10068         udelay(100);
10069
10070         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10071             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10072                 tp->tg3_flags |= TG3_FLAG_NVRAM;
10073
10074                 if (tg3_nvram_lock(tp)) {
10075                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10076                                "tg3_nvram_init failed.\n", tp->dev->name);
10077                         return;
10078                 }
10079                 tg3_enable_nvram_access(tp);
10080
10081                 tp->nvram_size = 0;
10082
10083                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10084                         tg3_get_5752_nvram_info(tp);
10085                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10086                         tg3_get_5755_nvram_info(tp);
10087                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10088                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
10089                         tg3_get_5787_nvram_info(tp);
10090                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10091                         tg3_get_5761_nvram_info(tp);
10092                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10093                         tg3_get_5906_nvram_info(tp);
10094                 else
10095                         tg3_get_nvram_info(tp);
10096
10097                 if (tp->nvram_size == 0)
10098                         tg3_get_nvram_size(tp);
10099
10100                 tg3_disable_nvram_access(tp);
10101                 tg3_nvram_unlock(tp);
10102
10103         } else {
10104                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10105
10106                 tg3_get_eeprom_size(tp);
10107         }
10108 }
10109
10110 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10111                                         u32 offset, u32 *val)
10112 {
10113         u32 tmp;
10114         int i;
10115
10116         if (offset > EEPROM_ADDR_ADDR_MASK ||
10117             (offset % 4) != 0)
10118                 return -EINVAL;
10119
10120         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10121                                         EEPROM_ADDR_DEVID_MASK |
10122                                         EEPROM_ADDR_READ);
10123         tw32(GRC_EEPROM_ADDR,
10124              tmp |
10125              (0 << EEPROM_ADDR_DEVID_SHIFT) |
10126              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10127               EEPROM_ADDR_ADDR_MASK) |
10128              EEPROM_ADDR_READ | EEPROM_ADDR_START);
10129
10130         for (i = 0; i < 1000; i++) {
10131                 tmp = tr32(GRC_EEPROM_ADDR);
10132
10133                 if (tmp & EEPROM_ADDR_COMPLETE)
10134                         break;
10135                 msleep(1);
10136         }
10137         if (!(tmp & EEPROM_ADDR_COMPLETE))
10138                 return -EBUSY;
10139
10140         *val = tr32(GRC_EEPROM_DATA);
10141         return 0;
10142 }
10143
10144 #define NVRAM_CMD_TIMEOUT 10000
10145
10146 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10147 {
10148         int i;
10149
10150         tw32(NVRAM_CMD, nvram_cmd);
10151         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10152                 udelay(10);
10153                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10154                         udelay(10);
10155                         break;
10156                 }
10157         }
10158         if (i == NVRAM_CMD_TIMEOUT) {
10159                 return -EBUSY;
10160         }
10161         return 0;
10162 }
10163
10164 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10165 {
10166         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10167             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10168             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10169            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10170             (tp->nvram_jedecnum == JEDEC_ATMEL))
10171
10172                 addr = ((addr / tp->nvram_pagesize) <<
10173                         ATMEL_AT45DB0X1B_PAGE_POS) +
10174                        (addr % tp->nvram_pagesize);
10175
10176         return addr;
10177 }
10178
10179 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10180 {
10181         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10182             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10183             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10184            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10185             (tp->nvram_jedecnum == JEDEC_ATMEL))
10186
10187                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10188                         tp->nvram_pagesize) +
10189                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10190
10191         return addr;
10192 }
10193
10194 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10195 {
10196         int ret;
10197
10198         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10199                 return tg3_nvram_read_using_eeprom(tp, offset, val);
10200
10201         offset = tg3_nvram_phys_addr(tp, offset);
10202
10203         if (offset > NVRAM_ADDR_MSK)
10204                 return -EINVAL;
10205
10206         ret = tg3_nvram_lock(tp);
10207         if (ret)
10208                 return ret;
10209
10210         tg3_enable_nvram_access(tp);
10211
10212         tw32(NVRAM_ADDR, offset);
10213         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10214                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10215
10216         if (ret == 0)
10217                 *val = swab32(tr32(NVRAM_RDDATA));
10218
10219         tg3_disable_nvram_access(tp);
10220
10221         tg3_nvram_unlock(tp);
10222
10223         return ret;
10224 }
10225
10226 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10227 {
10228         u32 v;
10229         int res = tg3_nvram_read(tp, offset, &v);
10230         if (!res)
10231                 *val = cpu_to_le32(v);
10232         return res;
10233 }
10234
10235 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10236 {
10237         int err;
10238         u32 tmp;
10239
10240         err = tg3_nvram_read(tp, offset, &tmp);
10241         *val = swab32(tmp);
10242         return err;
10243 }
10244
10245 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10246                                     u32 offset, u32 len, u8 *buf)
10247 {
10248         int i, j, rc = 0;
10249         u32 val;
10250
10251         for (i = 0; i < len; i += 4) {
10252                 u32 addr;
10253                 __le32 data;
10254
10255                 addr = offset + i;
10256
10257                 memcpy(&data, buf + i, 4);
10258
10259                 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
10260
10261                 val = tr32(GRC_EEPROM_ADDR);
10262                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10263
10264                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10265                         EEPROM_ADDR_READ);
10266                 tw32(GRC_EEPROM_ADDR, val |
10267                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
10268                         (addr & EEPROM_ADDR_ADDR_MASK) |
10269                         EEPROM_ADDR_START |
10270                         EEPROM_ADDR_WRITE);
10271
10272                 for (j = 0; j < 1000; j++) {
10273                         val = tr32(GRC_EEPROM_ADDR);
10274
10275                         if (val & EEPROM_ADDR_COMPLETE)
10276                                 break;
10277                         msleep(1);
10278                 }
10279                 if (!(val & EEPROM_ADDR_COMPLETE)) {
10280                         rc = -EBUSY;
10281                         break;
10282                 }
10283         }
10284
10285         return rc;
10286 }
10287
10288 /* offset and length are dword aligned */
10289 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10290                 u8 *buf)
10291 {
10292         int ret = 0;
10293         u32 pagesize = tp->nvram_pagesize;
10294         u32 pagemask = pagesize - 1;
10295         u32 nvram_cmd;
10296         u8 *tmp;
10297
10298         tmp = kmalloc(pagesize, GFP_KERNEL);
10299         if (tmp == NULL)
10300                 return -ENOMEM;
10301
10302         while (len) {
10303                 int j;
10304                 u32 phy_addr, page_off, size;
10305
10306                 phy_addr = offset & ~pagemask;
10307
10308                 for (j = 0; j < pagesize; j += 4) {
10309                         if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
10310                                                 (__le32 *) (tmp + j))))
10311                                 break;
10312                 }
10313                 if (ret)
10314                         break;
10315
10316                 page_off = offset & pagemask;
10317                 size = pagesize;
10318                 if (len < size)
10319                         size = len;
10320
10321                 len -= size;
10322
10323                 memcpy(tmp + page_off, buf, size);
10324
10325                 offset = offset + (pagesize - page_off);
10326
10327                 tg3_enable_nvram_access(tp);
10328
10329                 /*
10330                  * Before we can erase the flash page, we need
10331                  * to issue a special "write enable" command.
10332                  */
10333                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10334
10335                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10336                         break;
10337
10338                 /* Erase the target page */
10339                 tw32(NVRAM_ADDR, phy_addr);
10340
10341                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10342                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10343
10344                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10345                         break;
10346
10347                 /* Issue another write enable to start the write. */
10348                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10349
10350                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10351                         break;
10352
10353                 for (j = 0; j < pagesize; j += 4) {
10354                         __be32 data;
10355
10356                         data = *((__be32 *) (tmp + j));
10357                         /* swab32(le32_to_cpu(data)), actually */
10358                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
10359
10360                         tw32(NVRAM_ADDR, phy_addr + j);
10361
10362                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10363                                 NVRAM_CMD_WR;
10364
10365                         if (j == 0)
10366                                 nvram_cmd |= NVRAM_CMD_FIRST;
10367                         else if (j == (pagesize - 4))
10368                                 nvram_cmd |= NVRAM_CMD_LAST;
10369
10370                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10371                                 break;
10372                 }
10373                 if (ret)
10374                         break;
10375         }
10376
10377         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10378         tg3_nvram_exec_cmd(tp, nvram_cmd);
10379
10380         kfree(tmp);
10381
10382         return ret;
10383 }
10384
10385 /* offset and length are dword aligned */
10386 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10387                 u8 *buf)
10388 {
10389         int i, ret = 0;
10390
10391         for (i = 0; i < len; i += 4, offset += 4) {
10392                 u32 page_off, phy_addr, nvram_cmd;
10393                 __be32 data;
10394
10395                 memcpy(&data, buf + i, 4);
10396                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
10397
10398                 page_off = offset % tp->nvram_pagesize;
10399
10400                 phy_addr = tg3_nvram_phys_addr(tp, offset);
10401
10402                 tw32(NVRAM_ADDR, phy_addr);
10403
10404                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10405
10406                 if ((page_off == 0) || (i == 0))
10407                         nvram_cmd |= NVRAM_CMD_FIRST;
10408                 if (page_off == (tp->nvram_pagesize - 4))
10409                         nvram_cmd |= NVRAM_CMD_LAST;
10410
10411                 if (i == (len - 4))
10412                         nvram_cmd |= NVRAM_CMD_LAST;
10413
10414                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
10415                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
10416                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
10417                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
10418                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
10419                     (tp->nvram_jedecnum == JEDEC_ST) &&
10420                     (nvram_cmd & NVRAM_CMD_FIRST)) {
10421
10422                         if ((ret = tg3_nvram_exec_cmd(tp,
10423                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10424                                 NVRAM_CMD_DONE)))
10425
10426                                 break;
10427                 }
10428                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10429                         /* We always do complete word writes to eeprom. */
10430                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10431                 }
10432
10433                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10434                         break;
10435         }
10436         return ret;
10437 }
10438
10439 /* offset and length are dword aligned */
10440 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10441 {
10442         int ret;
10443
10444         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10445                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10446                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
10447                 udelay(40);
10448         }
10449
10450         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10451                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10452         }
10453         else {
10454                 u32 grc_mode;
10455
10456                 ret = tg3_nvram_lock(tp);
10457                 if (ret)
10458                         return ret;
10459
10460                 tg3_enable_nvram_access(tp);
10461                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10462                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
10463                         tw32(NVRAM_WRITE1, 0x406);
10464
10465                 grc_mode = tr32(GRC_MODE);
10466                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10467
10468                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10469                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10470
10471                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
10472                                 buf);
10473                 }
10474                 else {
10475                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10476                                 buf);
10477                 }
10478
10479                 grc_mode = tr32(GRC_MODE);
10480                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10481
10482                 tg3_disable_nvram_access(tp);
10483                 tg3_nvram_unlock(tp);
10484         }
10485
10486         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10487                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10488                 udelay(40);
10489         }
10490
10491         return ret;
10492 }
10493
10494 struct subsys_tbl_ent {
10495         u16 subsys_vendor, subsys_devid;
10496         u32 phy_id;
10497 };
10498
10499 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10500         /* Broadcom boards. */
10501         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10502         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10503         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10504         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
10505         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10506         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10507         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
10508         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10509         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10510         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10511         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10512
10513         /* 3com boards. */
10514         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10515         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10516         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
10517         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10518         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10519
10520         /* DELL boards. */
10521         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10522         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10523         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10524         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10525
10526         /* Compaq boards. */
10527         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10528         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10529         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
10530         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10531         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10532
10533         /* IBM boards. */
10534         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10535 };
10536
10537 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10538 {
10539         int i;
10540
10541         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10542                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10543                      tp->pdev->subsystem_vendor) &&
10544                     (subsys_id_to_phy_id[i].subsys_devid ==
10545                      tp->pdev->subsystem_device))
10546                         return &subsys_id_to_phy_id[i];
10547         }
10548         return NULL;
10549 }
10550
10551 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10552 {
10553         u32 val;
10554         u16 pmcsr;
10555
10556         /* On some early chips the SRAM cannot be accessed in D3hot state,
10557          * so need make sure we're in D0.
10558          */
10559         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10560         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10561         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10562         msleep(1);
10563
10564         /* Make sure register accesses (indirect or otherwise)
10565          * will function correctly.
10566          */
10567         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10568                                tp->misc_host_ctrl);
10569
10570         /* The memory arbiter has to be enabled in order for SRAM accesses
10571          * to succeed.  Normally on powerup the tg3 chip firmware will make
10572          * sure it is enabled, but other entities such as system netboot
10573          * code might disable it.
10574          */
10575         val = tr32(MEMARB_MODE);
10576         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10577
10578         tp->phy_id = PHY_ID_INVALID;
10579         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10580
10581         /* Assume an onboard device and WOL capable by default.  */
10582         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
10583
10584         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10585                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
10586                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10587                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10588                 }
10589                 val = tr32(VCPU_CFGSHDW);
10590                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
10591                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10592                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10593                     (val & VCPU_CFGSHDW_WOL_MAGPKT))
10594                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10595                 return;
10596         }
10597
10598         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10599         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10600                 u32 nic_cfg, led_cfg;
10601                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10602                 int eeprom_phy_serdes = 0;
10603
10604                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10605                 tp->nic_sram_data_cfg = nic_cfg;
10606
10607                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10608                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10609                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10610                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10611                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10612                     (ver > 0) && (ver < 0x100))
10613                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10614
10615                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10616                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10617                         eeprom_phy_serdes = 1;
10618
10619                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10620                 if (nic_phy_id != 0) {
10621                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10622                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10623
10624                         eeprom_phy_id  = (id1 >> 16) << 10;
10625                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
10626                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
10627                 } else
10628                         eeprom_phy_id = 0;
10629
10630                 tp->phy_id = eeprom_phy_id;
10631                 if (eeprom_phy_serdes) {
10632                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10633                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10634                         else
10635                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10636                 }
10637
10638                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10639                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10640                                     SHASTA_EXT_LED_MODE_MASK);
10641                 else
10642                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10643
10644                 switch (led_cfg) {
10645                 default:
10646                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10647                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10648                         break;
10649
10650                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10651                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10652                         break;
10653
10654                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10655                         tp->led_ctrl = LED_CTRL_MODE_MAC;
10656
10657                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10658                          * read on some older 5700/5701 bootcode.
10659                          */
10660                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10661                             ASIC_REV_5700 ||
10662                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
10663                             ASIC_REV_5701)
10664                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10665
10666                         break;
10667
10668                 case SHASTA_EXT_LED_SHARED:
10669                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
10670                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10671                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10672                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10673                                                  LED_CTRL_MODE_PHY_2);
10674                         break;
10675
10676                 case SHASTA_EXT_LED_MAC:
10677                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10678                         break;
10679
10680                 case SHASTA_EXT_LED_COMBO:
10681                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
10682                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10683                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10684                                                  LED_CTRL_MODE_PHY_2);
10685                         break;
10686
10687                 };
10688
10689                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10690                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10691                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10692                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10693
10694                 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
10695                     tp->pci_chip_rev_id == CHIPREV_ID_5784_A1)
10696                         tp->led_ctrl = LED_CTRL_MODE_MAC;
10697
10698                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
10699                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
10700                         if ((tp->pdev->subsystem_vendor ==
10701                              PCI_VENDOR_ID_ARIMA) &&
10702                             (tp->pdev->subsystem_device == 0x205a ||
10703                              tp->pdev->subsystem_device == 0x2063))
10704                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10705                 } else {
10706                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10707                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10708                 }
10709
10710                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10711                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
10712                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10713                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10714                 }
10715                 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
10716                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
10717                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10718                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10719                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
10720
10721                 if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
10722                     nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
10723                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10724
10725                 if (cfg2 & (1 << 17))
10726                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10727
10728                 /* serdes signal pre-emphasis in register 0x590 set by */
10729                 /* bootcode if bit 18 is set */
10730                 if (cfg2 & (1 << 18))
10731                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
10732
10733                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10734                         u32 cfg3;
10735
10736                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10737                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10738                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10739                 }
10740         }
10741 }
10742
10743 static int __devinit tg3_phy_probe(struct tg3 *tp)
10744 {
10745         u32 hw_phy_id_1, hw_phy_id_2;
10746         u32 hw_phy_id, hw_phy_id_masked;
10747         int err;
10748
10749         /* Reading the PHY ID register can conflict with ASF
10750          * firwmare access to the PHY hardware.
10751          */
10752         err = 0;
10753         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10754             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
10755                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10756         } else {
10757                 /* Now read the physical PHY_ID from the chip and verify
10758                  * that it is sane.  If it doesn't look good, we fall back
10759                  * to either the hard-coded table based PHY_ID and failing
10760                  * that the value found in the eeprom area.
10761                  */
10762                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10763                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10764
10765                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
10766                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10767                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
10768
10769                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10770         }
10771
10772         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10773                 tp->phy_id = hw_phy_id;
10774                 if (hw_phy_id_masked == PHY_ID_BCM8002)
10775                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10776                 else
10777                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
10778         } else {
10779                 if (tp->phy_id != PHY_ID_INVALID) {
10780                         /* Do nothing, phy ID already set up in
10781                          * tg3_get_eeprom_hw_cfg().
10782                          */
10783                 } else {
10784                         struct subsys_tbl_ent *p;
10785
10786                         /* No eeprom signature?  Try the hardcoded
10787                          * subsys device table.
10788                          */
10789                         p = lookup_by_subsys(tp);
10790                         if (!p)
10791                                 return -ENODEV;
10792
10793                         tp->phy_id = p->phy_id;
10794                         if (!tp->phy_id ||
10795                             tp->phy_id == PHY_ID_BCM8002)
10796                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10797                 }
10798         }
10799
10800         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
10801             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
10802             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
10803                 u32 bmsr, adv_reg, tg3_ctrl, mask;
10804
10805                 tg3_readphy(tp, MII_BMSR, &bmsr);
10806                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10807                     (bmsr & BMSR_LSTATUS))
10808                         goto skip_phy_reset;
10809
10810                 err = tg3_phy_reset(tp);
10811                 if (err)
10812                         return err;
10813
10814                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10815                            ADVERTISE_100HALF | ADVERTISE_100FULL |
10816                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10817                 tg3_ctrl = 0;
10818                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10819                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10820                                     MII_TG3_CTRL_ADV_1000_FULL);
10821                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10822                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10823                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10824                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
10825                 }
10826
10827                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10828                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10829                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
10830                 if (!tg3_copper_is_advertising_all(tp, mask)) {
10831                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10832
10833                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10834                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10835
10836                         tg3_writephy(tp, MII_BMCR,
10837                                      BMCR_ANENABLE | BMCR_ANRESTART);
10838                 }
10839                 tg3_phy_set_wirespeed(tp);
10840
10841                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10842                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10843                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10844         }
10845
10846 skip_phy_reset:
10847         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
10848                 err = tg3_init_5401phy_dsp(tp);
10849                 if (err)
10850                         return err;
10851         }
10852
10853         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
10854                 err = tg3_init_5401phy_dsp(tp);
10855         }
10856
10857         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10858                 tp->link_config.advertising =
10859                         (ADVERTISED_1000baseT_Half |
10860                          ADVERTISED_1000baseT_Full |
10861                          ADVERTISED_Autoneg |
10862                          ADVERTISED_FIBRE);
10863         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10864                 tp->link_config.advertising &=
10865                         ~(ADVERTISED_1000baseT_Half |
10866                           ADVERTISED_1000baseT_Full);
10867
10868         return err;
10869 }
10870
10871 static void __devinit tg3_read_partno(struct tg3 *tp)
10872 {
10873         unsigned char vpd_data[256];
10874         unsigned int i;
10875         u32 magic;
10876
10877         if (tg3_nvram_read_swab(tp, 0x0, &magic))
10878                 goto out_not_found;
10879
10880         if (magic == TG3_EEPROM_MAGIC) {
10881                 for (i = 0; i < 256; i += 4) {
10882                         u32 tmp;
10883
10884                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10885                                 goto out_not_found;
10886
10887                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
10888                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
10889                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10890                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10891                 }
10892         } else {
10893                 int vpd_cap;
10894
10895                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10896                 for (i = 0; i < 256; i += 4) {
10897                         u32 tmp, j = 0;
10898                         __le32 v;
10899                         u16 tmp16;
10900
10901                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10902                                               i);
10903                         while (j++ < 100) {
10904                                 pci_read_config_word(tp->pdev, vpd_cap +
10905                                                      PCI_VPD_ADDR, &tmp16);
10906                                 if (tmp16 & 0x8000)
10907                                         break;
10908                                 msleep(1);
10909                         }
10910                         if (!(tmp16 & 0x8000))
10911                                 goto out_not_found;
10912
10913                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10914                                               &tmp);
10915                         v = cpu_to_le32(tmp);
10916                         memcpy(&vpd_data[i], &v, 4);
10917                 }
10918         }
10919
10920         /* Now parse and find the part number. */
10921         for (i = 0; i < 254; ) {
10922                 unsigned char val = vpd_data[i];
10923                 unsigned int block_end;
10924
10925                 if (val == 0x82 || val == 0x91) {
10926                         i = (i + 3 +
10927                              (vpd_data[i + 1] +
10928                               (vpd_data[i + 2] << 8)));
10929                         continue;
10930                 }
10931
10932                 if (val != 0x90)
10933                         goto out_not_found;
10934
10935                 block_end = (i + 3 +
10936                              (vpd_data[i + 1] +
10937                               (vpd_data[i + 2] << 8)));
10938                 i += 3;
10939
10940                 if (block_end > 256)
10941                         goto out_not_found;
10942
10943                 while (i < (block_end - 2)) {
10944                         if (vpd_data[i + 0] == 'P' &&
10945                             vpd_data[i + 1] == 'N') {
10946                                 int partno_len = vpd_data[i + 2];
10947
10948                                 i += 3;
10949                                 if (partno_len > 24 || (partno_len + i) > 256)
10950                                         goto out_not_found;
10951
10952                                 memcpy(tp->board_part_number,
10953                                        &vpd_data[i], partno_len);
10954
10955                                 /* Success. */
10956                                 return;
10957                         }
10958                         i += 3 + vpd_data[i + 2];
10959                 }
10960
10961                 /* Part number not found. */
10962                 goto out_not_found;
10963         }
10964
10965 out_not_found:
10966         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10967                 strcpy(tp->board_part_number, "BCM95906");
10968         else
10969                 strcpy(tp->board_part_number, "none");
10970 }
10971
10972 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
10973 {
10974         u32 val;
10975
10976         if (tg3_nvram_read_swab(tp, offset, &val) ||
10977             (val & 0xfc000000) != 0x0c000000 ||
10978             tg3_nvram_read_swab(tp, offset + 4, &val) ||
10979             val != 0)
10980                 return 0;
10981
10982         return 1;
10983 }
10984
10985 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10986 {
10987         u32 val, offset, start;
10988         u32 ver_offset;
10989         int i, bcnt;
10990
10991         if (tg3_nvram_read_swab(tp, 0, &val))
10992                 return;
10993
10994         if (val != TG3_EEPROM_MAGIC)
10995                 return;
10996
10997         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10998             tg3_nvram_read_swab(tp, 0x4, &start))
10999                 return;
11000
11001         offset = tg3_nvram_logical_addr(tp, offset);
11002
11003         if (!tg3_fw_img_is_valid(tp, offset) ||
11004             tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
11005                 return;
11006
11007         offset = offset + ver_offset - start;
11008         for (i = 0; i < 16; i += 4) {
11009                 __le32 v;
11010                 if (tg3_nvram_read_le(tp, offset + i, &v))
11011                         return;
11012
11013                 memcpy(tp->fw_ver + i, &v, 4);
11014         }
11015
11016         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11017              (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11018                 return;
11019
11020         for (offset = TG3_NVM_DIR_START;
11021              offset < TG3_NVM_DIR_END;
11022              offset += TG3_NVM_DIRENT_SIZE) {
11023                 if (tg3_nvram_read_swab(tp, offset, &val))
11024                         return;
11025
11026                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11027                         break;
11028         }
11029
11030         if (offset == TG3_NVM_DIR_END)
11031                 return;
11032
11033         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11034                 start = 0x08000000;
11035         else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11036                 return;
11037
11038         if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11039             !tg3_fw_img_is_valid(tp, offset) ||
11040             tg3_nvram_read_swab(tp, offset + 8, &val))
11041                 return;
11042
11043         offset += val - start;
11044
11045         bcnt = strlen(tp->fw_ver);
11046
11047         tp->fw_ver[bcnt++] = ',';
11048         tp->fw_ver[bcnt++] = ' ';
11049
11050         for (i = 0; i < 4; i++) {
11051                 __le32 v;
11052                 if (tg3_nvram_read_le(tp, offset, &v))
11053                         return;
11054
11055                 offset += sizeof(v);
11056
11057                 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11058                         memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
11059                         break;
11060                 }
11061
11062                 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11063                 bcnt += sizeof(v);
11064         }
11065
11066         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11067 }
11068
11069 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11070
11071 static int __devinit tg3_get_invariants(struct tg3 *tp)
11072 {
11073         static struct pci_device_id write_reorder_chipsets[] = {
11074                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11075                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11076                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11077                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11078                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11079                              PCI_DEVICE_ID_VIA_8385_0) },
11080                 { },
11081         };
11082         u32 misc_ctrl_reg;
11083         u32 cacheline_sz_reg;
11084         u32 pci_state_reg, grc_misc_cfg;
11085         u32 val;
11086         u16 pci_cmd;
11087         int err, pcie_cap;
11088
11089         /* Force memory write invalidate off.  If we leave it on,
11090          * then on 5700_BX chips we have to enable a workaround.
11091          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11092          * to match the cacheline size.  The Broadcom driver have this
11093          * workaround but turns MWI off all the times so never uses
11094          * it.  This seems to suggest that the workaround is insufficient.
11095          */
11096         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11097         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11098         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11099
11100         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11101          * has the register indirect write enable bit set before
11102          * we try to access any of the MMIO registers.  It is also
11103          * critical that the PCI-X hw workaround situation is decided
11104          * before that as well.
11105          */
11106         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11107                               &misc_ctrl_reg);
11108
11109         tp->pci_chip_rev_id = (misc_ctrl_reg >>
11110                                MISC_HOST_CTRL_CHIPREV_SHIFT);
11111         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11112                 u32 prod_id_asic_rev;
11113
11114                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11115                                       &prod_id_asic_rev);
11116                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11117         }
11118
11119         /* Wrong chip ID in 5752 A0. This code can be removed later
11120          * as A0 is not in production.
11121          */
11122         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11123                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11124
11125         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11126          * we need to disable memory and use config. cycles
11127          * only to access all registers. The 5702/03 chips
11128          * can mistakenly decode the special cycles from the
11129          * ICH chipsets as memory write cycles, causing corruption
11130          * of register and memory space. Only certain ICH bridges
11131          * will drive special cycles with non-zero data during the
11132          * address phase which can fall within the 5703's address
11133          * range. This is not an ICH bug as the PCI spec allows
11134          * non-zero address during special cycles. However, only
11135          * these ICH bridges are known to drive non-zero addresses
11136          * during special cycles.
11137          *
11138          * Since special cycles do not cross PCI bridges, we only
11139          * enable this workaround if the 5703 is on the secondary
11140          * bus of these ICH bridges.
11141          */
11142         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11143             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11144                 static struct tg3_dev_id {
11145                         u32     vendor;
11146                         u32     device;
11147                         u32     rev;
11148                 } ich_chipsets[] = {
11149                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11150                           PCI_ANY_ID },
11151                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11152                           PCI_ANY_ID },
11153                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11154                           0xa },
11155                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11156                           PCI_ANY_ID },
11157                         { },
11158                 };
11159                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11160                 struct pci_dev *bridge = NULL;
11161
11162                 while (pci_id->vendor != 0) {
11163                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
11164                                                 bridge);
11165                         if (!bridge) {
11166                                 pci_id++;
11167                                 continue;
11168                         }
11169                         if (pci_id->rev != PCI_ANY_ID) {
11170                                 if (bridge->revision > pci_id->rev)
11171                                         continue;
11172                         }
11173                         if (bridge->subordinate &&
11174                             (bridge->subordinate->number ==
11175                              tp->pdev->bus->number)) {
11176
11177                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11178                                 pci_dev_put(bridge);
11179                                 break;
11180                         }
11181                 }
11182         }
11183
11184         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11185          * DMA addresses > 40-bit. This bridge may have other additional
11186          * 57xx devices behind it in some 4-port NIC designs for example.
11187          * Any tg3 device found behind the bridge will also need the 40-bit
11188          * DMA workaround.
11189          */
11190         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11191             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11192                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
11193                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11194                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
11195         }
11196         else {
11197                 struct pci_dev *bridge = NULL;
11198
11199                 do {
11200                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11201                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
11202                                                 bridge);
11203                         if (bridge && bridge->subordinate &&
11204                             (bridge->subordinate->number <=
11205                              tp->pdev->bus->number) &&
11206                             (bridge->subordinate->subordinate >=
11207                              tp->pdev->bus->number)) {
11208                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11209                                 pci_dev_put(bridge);
11210                                 break;
11211                         }
11212                 } while (bridge);
11213         }
11214
11215         /* Initialize misc host control in PCI block. */
11216         tp->misc_host_ctrl |= (misc_ctrl_reg &
11217                                MISC_HOST_CTRL_CHIPREV);
11218         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11219                                tp->misc_host_ctrl);
11220
11221         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11222                               &cacheline_sz_reg);
11223
11224         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
11225         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
11226         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
11227         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
11228
11229         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11230             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11231                 tp->pdev_peer = tg3_find_peer(tp);
11232
11233         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11234             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11235             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11236             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11237             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11238             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11239             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11240             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11241                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11242
11243         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11244             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11245                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11246
11247         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
11248                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11249                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11250                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11251                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11252                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11253                      tp->pdev_peer == tp->pdev))
11254                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11255
11256                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11257                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11258                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11259                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11260                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11261                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
11262                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
11263                 } else {
11264                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
11265                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11266                                 ASIC_REV_5750 &&
11267                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
11268                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
11269                 }
11270         }
11271
11272         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
11273             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
11274             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11275             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
11276             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
11277             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
11278             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
11279             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11280                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11281
11282         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11283         if (pcie_cap != 0) {
11284                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
11285
11286                 pcie_set_readrq(tp->pdev, 4096);
11287
11288                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11289                         u16 lnkctl;
11290
11291                         pci_read_config_word(tp->pdev,
11292                                              pcie_cap + PCI_EXP_LNKCTL,
11293                                              &lnkctl);
11294                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
11295                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11296                 }
11297         }
11298
11299         /* If we have an AMD 762 or VIA K8T800 chipset, write
11300          * reordering to the mailbox registers done by the host
11301          * controller can cause major troubles.  We read back from
11302          * every mailbox register write to force the writes to be
11303          * posted to the chip in order.
11304          */
11305         if (pci_dev_present(write_reorder_chipsets) &&
11306             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11307                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11308
11309         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11310             tp->pci_lat_timer < 64) {
11311                 tp->pci_lat_timer = 64;
11312
11313                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
11314                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
11315                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
11316                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
11317
11318                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11319                                        cacheline_sz_reg);
11320         }
11321
11322         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11323             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11324                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11325                 if (!tp->pcix_cap) {
11326                         printk(KERN_ERR PFX "Cannot find PCI-X "
11327                                             "capability, aborting.\n");
11328                         return -EIO;
11329                 }
11330         }
11331
11332         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11333                               &pci_state_reg);
11334
11335         if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
11336                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11337
11338                 /* If this is a 5700 BX chipset, and we are in PCI-X
11339                  * mode, enable register write workaround.
11340                  *
11341                  * The workaround is to use indirect register accesses
11342                  * for all chip writes not to mailbox registers.
11343                  */
11344                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11345                         u32 pm_reg;
11346
11347                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11348
11349                         /* The chip can have it's power management PCI config
11350                          * space registers clobbered due to this bug.
11351                          * So explicitly force the chip into D0 here.
11352                          */
11353                         pci_read_config_dword(tp->pdev,
11354                                               tp->pm_cap + PCI_PM_CTRL,
11355                                               &pm_reg);
11356                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11357                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
11358                         pci_write_config_dword(tp->pdev,
11359                                                tp->pm_cap + PCI_PM_CTRL,
11360                                                pm_reg);
11361
11362                         /* Also, force SERR#/PERR# in PCI command. */
11363                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11364                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11365                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11366                 }
11367         }
11368
11369         /* 5700 BX chips need to have their TX producer index mailboxes
11370          * written twice to workaround a bug.
11371          */
11372         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11373                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11374
11375         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11376                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11377         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11378                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11379
11380         /* Chip-specific fixup from Broadcom driver */
11381         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11382             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11383                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11384                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11385         }
11386
11387         /* Default fast path register access methods */
11388         tp->read32 = tg3_read32;
11389         tp->write32 = tg3_write32;
11390         tp->read32_mbox = tg3_read32;
11391         tp->write32_mbox = tg3_write32;
11392         tp->write32_tx_mbox = tg3_write32;
11393         tp->write32_rx_mbox = tg3_write32;
11394
11395         /* Various workaround register access methods */
11396         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11397                 tp->write32 = tg3_write_indirect_reg32;
11398         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11399                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11400                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11401                 /*
11402                  * Back to back register writes can cause problems on these
11403                  * chips, the workaround is to read back all reg writes
11404                  * except those to mailbox regs.
11405                  *
11406                  * See tg3_write_indirect_reg32().
11407                  */
11408                 tp->write32 = tg3_write_flush_reg32;
11409         }
11410
11411
11412         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11413             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11414                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11415                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11416                         tp->write32_rx_mbox = tg3_write_flush_reg32;
11417         }
11418
11419         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11420                 tp->read32 = tg3_read_indirect_reg32;
11421                 tp->write32 = tg3_write_indirect_reg32;
11422                 tp->read32_mbox = tg3_read_indirect_mbox;
11423                 tp->write32_mbox = tg3_write_indirect_mbox;
11424                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11425                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11426
11427                 iounmap(tp->regs);
11428                 tp->regs = NULL;
11429
11430                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11431                 pci_cmd &= ~PCI_COMMAND_MEMORY;
11432                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11433         }
11434         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11435                 tp->read32_mbox = tg3_read32_mbox_5906;
11436                 tp->write32_mbox = tg3_write32_mbox_5906;
11437                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11438                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11439         }
11440
11441         if (tp->write32 == tg3_write_indirect_reg32 ||
11442             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11443              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11444               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
11445                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11446
11447         /* Get eeprom hw config before calling tg3_set_power_state().
11448          * In particular, the TG3_FLG2_IS_NIC flag must be
11449          * determined before calling tg3_set_power_state() so that
11450          * we know whether or not to switch out of Vaux power.
11451          * When the flag is set, it means that GPIO1 is used for eeprom
11452          * write protect and also implies that it is a LOM where GPIOs
11453          * are not used to switch power.
11454          */
11455         tg3_get_eeprom_hw_cfg(tp);
11456
11457         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11458                 /* Allow reads and writes to the
11459                  * APE register and memory space.
11460                  */
11461                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11462                                  PCISTATE_ALLOW_APE_SHMEM_WR;
11463                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11464                                        pci_state_reg);
11465         }
11466
11467         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11468             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11469                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11470
11471                 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
11472                     tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
11473                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
11474                     tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
11475                         tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
11476         }
11477
11478         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11479          * GPIO1 driven high will bring 5700's external PHY out of reset.
11480          * It is also used as eeprom write protect on LOMs.
11481          */
11482         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11483         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11484             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11485                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11486                                        GRC_LCLCTRL_GPIO_OUTPUT1);
11487         /* Unused GPIO3 must be driven as output on 5752 because there
11488          * are no pull-up resistors on unused GPIO pins.
11489          */
11490         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11491                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
11492
11493         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11494                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11495
11496         /* Force the chip into D0. */
11497         err = tg3_set_power_state(tp, PCI_D0);
11498         if (err) {
11499                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11500                        pci_name(tp->pdev));
11501                 return err;
11502         }
11503
11504         /* 5700 B0 chips do not support checksumming correctly due
11505          * to hardware bugs.
11506          */
11507         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11508                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11509
11510         /* Derive initial jumbo mode from MTU assigned in
11511          * ether_setup() via the alloc_etherdev() call
11512          */
11513         if (tp->dev->mtu > ETH_DATA_LEN &&
11514             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11515                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
11516
11517         /* Determine WakeOnLan speed to use. */
11518         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11519             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11520             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11521             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11522                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11523         } else {
11524                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11525         }
11526
11527         /* A few boards don't want Ethernet@WireSpeed phy feature */
11528         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11529             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11530              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
11531              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
11532             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
11533             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
11534                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11535
11536         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11537             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11538                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11539         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11540                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11541
11542         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11543                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11544                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11545                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11546                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11547                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11548                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11549                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
11550                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11551                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11552                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11553                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11554         }
11555
11556         tp->coalesce_mode = 0;
11557         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11558             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11559                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11560
11561         /* Initialize MAC MI mode, polling disabled. */
11562         tw32_f(MAC_MI_MODE, tp->mi_mode);
11563         udelay(80);
11564
11565         /* Initialize data/descriptor byte/word swapping. */
11566         val = tr32(GRC_MODE);
11567         val &= GRC_MODE_HOST_STACKUP;
11568         tw32(GRC_MODE, val | tp->grc_mode);
11569
11570         tg3_switch_clocks(tp);
11571
11572         /* Clear this out for sanity. */
11573         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11574
11575         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11576                               &pci_state_reg);
11577         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11578             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11579                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11580
11581                 if (chiprevid == CHIPREV_ID_5701_A0 ||
11582                     chiprevid == CHIPREV_ID_5701_B0 ||
11583                     chiprevid == CHIPREV_ID_5701_B2 ||
11584                     chiprevid == CHIPREV_ID_5701_B5) {
11585                         void __iomem *sram_base;
11586
11587                         /* Write some dummy words into the SRAM status block
11588                          * area, see if it reads back correctly.  If the return
11589                          * value is bad, force enable the PCIX workaround.
11590                          */
11591                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11592
11593                         writel(0x00000000, sram_base);
11594                         writel(0x00000000, sram_base + 4);
11595                         writel(0xffffffff, sram_base + 4);
11596                         if (readl(sram_base) != 0x00000000)
11597                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11598                 }
11599         }
11600
11601         udelay(50);
11602         tg3_nvram_init(tp);
11603
11604         grc_misc_cfg = tr32(GRC_MISC_CFG);
11605         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11606
11607         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11608             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11609              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11610                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11611
11612         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11613             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11614                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11615         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11616                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11617                                       HOSTCC_MODE_CLRTICK_TXBD);
11618
11619                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11620                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11621                                        tp->misc_host_ctrl);
11622         }
11623
11624         /* these are limited to 10/100 only */
11625         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11626              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11627             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11628              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11629              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11630               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11631               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11632             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11633              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
11634               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11635               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
11636             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11637                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11638
11639         err = tg3_phy_probe(tp);
11640         if (err) {
11641                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11642                        pci_name(tp->pdev), err);
11643                 /* ... but do not return immediately ... */
11644         }
11645
11646         tg3_read_partno(tp);
11647         tg3_read_fw_ver(tp);
11648
11649         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11650                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11651         } else {
11652                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11653                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11654                 else
11655                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11656         }
11657
11658         /* 5700 {AX,BX} chips have a broken status block link
11659          * change bit implementation, so we must use the
11660          * status register in those cases.
11661          */
11662         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11663                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11664         else
11665                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11666
11667         /* The led_ctrl is set during tg3_phy_probe, here we might
11668          * have to force the link status polling mechanism based
11669          * upon subsystem IDs.
11670          */
11671         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
11672             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11673             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11674                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11675                                   TG3_FLAG_USE_LINKCHG_REG);
11676         }
11677
11678         /* For all SERDES we poll the MAC status register. */
11679         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11680                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11681         else
11682                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11683
11684         /* All chips before 5787 can get confused if TX buffers
11685          * straddle the 4GB address boundary in some cases.
11686          */
11687         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11688             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11689             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11690             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11691             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11692                 tp->dev->hard_start_xmit = tg3_start_xmit;
11693         else
11694                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
11695
11696         tp->rx_offset = 2;
11697         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11698             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11699                 tp->rx_offset = 0;
11700
11701         tp->rx_std_max_post = TG3_RX_RING_SIZE;
11702
11703         /* Increment the rx prod index on the rx std ring by at most
11704          * 8 for these chips to workaround hw errata.
11705          */
11706         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11707             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11708             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11709                 tp->rx_std_max_post = 8;
11710
11711         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
11712                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
11713                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
11714
11715         return err;
11716 }
11717
11718 #ifdef CONFIG_SPARC
11719 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
11720 {
11721         struct net_device *dev = tp->dev;
11722         struct pci_dev *pdev = tp->pdev;
11723         struct device_node *dp = pci_device_to_OF_node(pdev);
11724         const unsigned char *addr;
11725         int len;
11726
11727         addr = of_get_property(dp, "local-mac-address", &len);
11728         if (addr && len == 6) {
11729                 memcpy(dev->dev_addr, addr, 6);
11730                 memcpy(dev->perm_addr, dev->dev_addr, 6);
11731                 return 0;
11732         }
11733         return -ENODEV;
11734 }
11735
11736 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
11737 {
11738         struct net_device *dev = tp->dev;
11739
11740         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
11741         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
11742         return 0;
11743 }
11744 #endif
11745
11746 static int __devinit tg3_get_device_address(struct tg3 *tp)
11747 {
11748         struct net_device *dev = tp->dev;
11749         u32 hi, lo, mac_offset;
11750         int addr_ok = 0;
11751
11752 #ifdef CONFIG_SPARC
11753         if (!tg3_get_macaddr_sparc(tp))
11754                 return 0;
11755 #endif
11756
11757         mac_offset = 0x7c;
11758         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11759             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11760                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
11761                         mac_offset = 0xcc;
11762                 if (tg3_nvram_lock(tp))
11763                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
11764                 else
11765                         tg3_nvram_unlock(tp);
11766         }
11767         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11768                 mac_offset = 0x10;
11769
11770         /* First try to get it from MAC address mailbox. */
11771         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
11772         if ((hi >> 16) == 0x484b) {
11773                 dev->dev_addr[0] = (hi >>  8) & 0xff;
11774                 dev->dev_addr[1] = (hi >>  0) & 0xff;
11775
11776                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
11777                 dev->dev_addr[2] = (lo >> 24) & 0xff;
11778                 dev->dev_addr[3] = (lo >> 16) & 0xff;
11779                 dev->dev_addr[4] = (lo >>  8) & 0xff;
11780                 dev->dev_addr[5] = (lo >>  0) & 0xff;
11781
11782                 /* Some old bootcode may report a 0 MAC address in SRAM */
11783                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
11784         }
11785         if (!addr_ok) {
11786                 /* Next, try NVRAM. */
11787                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
11788                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
11789                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
11790                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
11791                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
11792                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
11793                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
11794                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
11795                 }
11796                 /* Finally just fetch it out of the MAC control regs. */
11797                 else {
11798                         hi = tr32(MAC_ADDR_0_HIGH);
11799                         lo = tr32(MAC_ADDR_0_LOW);
11800
11801                         dev->dev_addr[5] = lo & 0xff;
11802                         dev->dev_addr[4] = (lo >> 8) & 0xff;
11803                         dev->dev_addr[3] = (lo >> 16) & 0xff;
11804                         dev->dev_addr[2] = (lo >> 24) & 0xff;
11805                         dev->dev_addr[1] = hi & 0xff;
11806                         dev->dev_addr[0] = (hi >> 8) & 0xff;
11807                 }
11808         }
11809
11810         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
11811 #ifdef CONFIG_SPARC64
11812                 if (!tg3_get_default_macaddr_sparc(tp))
11813                         return 0;
11814 #endif
11815                 return -EINVAL;
11816         }
11817         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
11818         return 0;
11819 }
11820
11821 #define BOUNDARY_SINGLE_CACHELINE       1
11822 #define BOUNDARY_MULTI_CACHELINE        2
11823
11824 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
11825 {
11826         int cacheline_size;
11827         u8 byte;
11828         int goal;
11829
11830         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
11831         if (byte == 0)
11832                 cacheline_size = 1024;
11833         else
11834                 cacheline_size = (int) byte * 4;
11835
11836         /* On 5703 and later chips, the boundary bits have no
11837          * effect.
11838          */
11839         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11840             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
11841             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11842                 goto out;
11843
11844 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
11845         goal = BOUNDARY_MULTI_CACHELINE;
11846 #else
11847 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
11848         goal = BOUNDARY_SINGLE_CACHELINE;
11849 #else
11850         goal = 0;
11851 #endif
11852 #endif
11853
11854         if (!goal)
11855                 goto out;
11856
11857         /* PCI controllers on most RISC systems tend to disconnect
11858          * when a device tries to burst across a cache-line boundary.
11859          * Therefore, letting tg3 do so just wastes PCI bandwidth.
11860          *
11861          * Unfortunately, for PCI-E there are only limited
11862          * write-side controls for this, and thus for reads
11863          * we will still get the disconnects.  We'll also waste
11864          * these PCI cycles for both read and write for chips
11865          * other than 5700 and 5701 which do not implement the
11866          * boundary bits.
11867          */
11868         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11869             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
11870                 switch (cacheline_size) {
11871                 case 16:
11872                 case 32:
11873                 case 64:
11874                 case 128:
11875                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11876                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
11877                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
11878                         } else {
11879                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11880                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11881                         }
11882                         break;
11883
11884                 case 256:
11885                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
11886                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
11887                         break;
11888
11889                 default:
11890                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11891                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11892                         break;
11893                 };
11894         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11895                 switch (cacheline_size) {
11896                 case 16:
11897                 case 32:
11898                 case 64:
11899                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11900                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11901                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
11902                                 break;
11903                         }
11904                         /* fallthrough */
11905                 case 128:
11906                 default:
11907                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11908                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
11909                         break;
11910                 };
11911         } else {
11912                 switch (cacheline_size) {
11913                 case 16:
11914                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11915                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
11916                                         DMA_RWCTRL_WRITE_BNDRY_16);
11917                                 break;
11918                         }
11919                         /* fallthrough */
11920                 case 32:
11921                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11922                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
11923                                         DMA_RWCTRL_WRITE_BNDRY_32);
11924                                 break;
11925                         }
11926                         /* fallthrough */
11927                 case 64:
11928                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11929                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
11930                                         DMA_RWCTRL_WRITE_BNDRY_64);
11931                                 break;
11932                         }
11933                         /* fallthrough */
11934                 case 128:
11935                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
11936                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
11937                                         DMA_RWCTRL_WRITE_BNDRY_128);
11938                                 break;
11939                         }
11940                         /* fallthrough */
11941                 case 256:
11942                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
11943                                 DMA_RWCTRL_WRITE_BNDRY_256);
11944                         break;
11945                 case 512:
11946                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
11947                                 DMA_RWCTRL_WRITE_BNDRY_512);
11948                         break;
11949                 case 1024:
11950                 default:
11951                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
11952                                 DMA_RWCTRL_WRITE_BNDRY_1024);
11953                         break;
11954                 };
11955         }
11956
11957 out:
11958         return val;
11959 }
11960
11961 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
11962 {
11963         struct tg3_internal_buffer_desc test_desc;
11964         u32 sram_dma_descs;
11965         int i, ret;
11966
11967         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
11968
11969         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
11970         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
11971         tw32(RDMAC_STATUS, 0);
11972         tw32(WDMAC_STATUS, 0);
11973
11974         tw32(BUFMGR_MODE, 0);
11975         tw32(FTQ_RESET, 0);
11976
11977         test_desc.addr_hi = ((u64) buf_dma) >> 32;
11978         test_desc.addr_lo = buf_dma & 0xffffffff;
11979         test_desc.nic_mbuf = 0x00002100;
11980         test_desc.len = size;
11981
11982         /*
11983          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11984          * the *second* time the tg3 driver was getting loaded after an
11985          * initial scan.
11986          *
11987          * Broadcom tells me:
11988          *   ...the DMA engine is connected to the GRC block and a DMA
11989          *   reset may affect the GRC block in some unpredictable way...
11990          *   The behavior of resets to individual blocks has not been tested.
11991          *
11992          * Broadcom noted the GRC reset will also reset all sub-components.
11993          */
11994         if (to_device) {
11995                 test_desc.cqid_sqid = (13 << 8) | 2;
11996
11997                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
11998                 udelay(40);
11999         } else {
12000                 test_desc.cqid_sqid = (16 << 8) | 7;
12001
12002                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12003                 udelay(40);
12004         }
12005         test_desc.flags = 0x00000005;
12006
12007         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12008                 u32 val;
12009
12010                 val = *(((u32 *)&test_desc) + i);
12011                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12012                                        sram_dma_descs + (i * sizeof(u32)));
12013                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12014         }
12015         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12016
12017         if (to_device) {
12018                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12019         } else {
12020                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12021         }
12022
12023         ret = -ENODEV;
12024         for (i = 0; i < 40; i++) {
12025                 u32 val;
12026
12027                 if (to_device)
12028                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12029                 else
12030                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12031                 if ((val & 0xffff) == sram_dma_descs) {
12032                         ret = 0;
12033                         break;
12034                 }
12035
12036                 udelay(100);
12037         }
12038
12039         return ret;
12040 }
12041
12042 #define TEST_BUFFER_SIZE        0x2000
12043
12044 static int __devinit tg3_test_dma(struct tg3 *tp)
12045 {
12046         dma_addr_t buf_dma;
12047         u32 *buf, saved_dma_rwctrl;
12048         int ret;
12049
12050         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12051         if (!buf) {
12052                 ret = -ENOMEM;
12053                 goto out_nofree;
12054         }
12055
12056         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12057                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12058
12059         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12060
12061         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12062                 /* DMA read watermark not used on PCIE */
12063                 tp->dma_rwctrl |= 0x00180000;
12064         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12065                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12066                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12067                         tp->dma_rwctrl |= 0x003f0000;
12068                 else
12069                         tp->dma_rwctrl |= 0x003f000f;
12070         } else {
12071                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12072                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12073                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12074                         u32 read_water = 0x7;
12075
12076                         /* If the 5704 is behind the EPB bridge, we can
12077                          * do the less restrictive ONE_DMA workaround for
12078                          * better performance.
12079                          */
12080                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12081                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12082                                 tp->dma_rwctrl |= 0x8000;
12083                         else if (ccval == 0x6 || ccval == 0x7)
12084                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12085
12086                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12087                                 read_water = 4;
12088                         /* Set bit 23 to enable PCIX hw bug fix */
12089                         tp->dma_rwctrl |=
12090                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12091                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12092                                 (1 << 23);
12093                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12094                         /* 5780 always in PCIX mode */
12095                         tp->dma_rwctrl |= 0x00144000;
12096                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12097                         /* 5714 always in PCIX mode */
12098                         tp->dma_rwctrl |= 0x00148000;
12099                 } else {
12100                         tp->dma_rwctrl |= 0x001b000f;
12101                 }
12102         }
12103
12104         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12105             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12106                 tp->dma_rwctrl &= 0xfffffff0;
12107
12108         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12109             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12110                 /* Remove this if it causes problems for some boards. */
12111                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12112
12113                 /* On 5700/5701 chips, we need to set this bit.
12114                  * Otherwise the chip will issue cacheline transactions
12115                  * to streamable DMA memory with not all the byte
12116                  * enables turned on.  This is an error on several
12117                  * RISC PCI controllers, in particular sparc64.
12118                  *
12119                  * On 5703/5704 chips, this bit has been reassigned
12120                  * a different meaning.  In particular, it is used
12121                  * on those chips to enable a PCI-X workaround.
12122                  */
12123                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12124         }
12125
12126         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12127
12128 #if 0
12129         /* Unneeded, already done by tg3_get_invariants.  */
12130         tg3_switch_clocks(tp);
12131 #endif
12132
12133         ret = 0;
12134         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12135             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12136                 goto out;
12137
12138         /* It is best to perform DMA test with maximum write burst size
12139          * to expose the 5700/5701 write DMA bug.
12140          */
12141         saved_dma_rwctrl = tp->dma_rwctrl;
12142         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12143         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12144
12145         while (1) {
12146                 u32 *p = buf, i;
12147
12148                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12149                         p[i] = i;
12150
12151                 /* Send the buffer to the chip. */
12152                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12153                 if (ret) {
12154                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12155                         break;
12156                 }
12157
12158 #if 0
12159                 /* validate data reached card RAM correctly. */
12160                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12161                         u32 val;
12162                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
12163                         if (le32_to_cpu(val) != p[i]) {
12164                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
12165                                 /* ret = -ENODEV here? */
12166                         }
12167                         p[i] = 0;
12168                 }
12169 #endif
12170                 /* Now read it back. */
12171                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12172                 if (ret) {
12173                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
12174
12175                         break;
12176                 }
12177
12178                 /* Verify it. */
12179                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12180                         if (p[i] == i)
12181                                 continue;
12182
12183                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12184                             DMA_RWCTRL_WRITE_BNDRY_16) {
12185                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12186                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12187                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12188                                 break;
12189                         } else {
12190                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
12191                                 ret = -ENODEV;
12192                                 goto out;
12193                         }
12194                 }
12195
12196                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
12197                         /* Success. */
12198                         ret = 0;
12199                         break;
12200                 }
12201         }
12202         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12203             DMA_RWCTRL_WRITE_BNDRY_16) {
12204                 static struct pci_device_id dma_wait_state_chipsets[] = {
12205                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
12206                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
12207                         { },
12208                 };
12209
12210                 /* DMA test passed without adjusting DMA boundary,
12211                  * now look for chipsets that are known to expose the
12212                  * DMA bug without failing the test.
12213                  */
12214                 if (pci_dev_present(dma_wait_state_chipsets)) {
12215                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12216                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12217                 }
12218                 else
12219                         /* Safe to use the calculated DMA boundary. */
12220                         tp->dma_rwctrl = saved_dma_rwctrl;
12221
12222                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12223         }
12224
12225 out:
12226         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12227 out_nofree:
12228         return ret;
12229 }
12230
12231 static void __devinit tg3_init_link_config(struct tg3 *tp)
12232 {
12233         tp->link_config.advertising =
12234                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12235                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12236                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12237                  ADVERTISED_Autoneg | ADVERTISED_MII);
12238         tp->link_config.speed = SPEED_INVALID;
12239         tp->link_config.duplex = DUPLEX_INVALID;
12240         tp->link_config.autoneg = AUTONEG_ENABLE;
12241         tp->link_config.active_speed = SPEED_INVALID;
12242         tp->link_config.active_duplex = DUPLEX_INVALID;
12243         tp->link_config.phy_is_low_power = 0;
12244         tp->link_config.orig_speed = SPEED_INVALID;
12245         tp->link_config.orig_duplex = DUPLEX_INVALID;
12246         tp->link_config.orig_autoneg = AUTONEG_INVALID;
12247 }
12248
12249 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12250 {
12251         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12252                 tp->bufmgr_config.mbuf_read_dma_low_water =
12253                         DEFAULT_MB_RDMA_LOW_WATER_5705;
12254                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12255                         DEFAULT_MB_MACRX_LOW_WATER_5705;
12256                 tp->bufmgr_config.mbuf_high_water =
12257                         DEFAULT_MB_HIGH_WATER_5705;
12258                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12259                         tp->bufmgr_config.mbuf_mac_rx_low_water =
12260                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
12261                         tp->bufmgr_config.mbuf_high_water =
12262                                 DEFAULT_MB_HIGH_WATER_5906;
12263                 }
12264
12265                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12266                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12267                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12268                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12269                 tp->bufmgr_config.mbuf_high_water_jumbo =
12270                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12271         } else {
12272                 tp->bufmgr_config.mbuf_read_dma_low_water =
12273                         DEFAULT_MB_RDMA_LOW_WATER;
12274                 tp->bufmgr_config.mbuf_mac_rx_low_water =
12275                         DEFAULT_MB_MACRX_LOW_WATER;
12276                 tp->bufmgr_config.mbuf_high_water =
12277                         DEFAULT_MB_HIGH_WATER;
12278
12279                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12280                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12281                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12282                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12283                 tp->bufmgr_config.mbuf_high_water_jumbo =
12284                         DEFAULT_MB_HIGH_WATER_JUMBO;
12285         }
12286
12287         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12288         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12289 }
12290
12291 static char * __devinit tg3_phy_string(struct tg3 *tp)
12292 {
12293         switch (tp->phy_id & PHY_ID_MASK) {
12294         case PHY_ID_BCM5400:    return "5400";
12295         case PHY_ID_BCM5401:    return "5401";
12296         case PHY_ID_BCM5411:    return "5411";
12297         case PHY_ID_BCM5701:    return "5701";
12298         case PHY_ID_BCM5703:    return "5703";
12299         case PHY_ID_BCM5704:    return "5704";
12300         case PHY_ID_BCM5705:    return "5705";
12301         case PHY_ID_BCM5750:    return "5750";
12302         case PHY_ID_BCM5752:    return "5752";
12303         case PHY_ID_BCM5714:    return "5714";
12304         case PHY_ID_BCM5780:    return "5780";
12305         case PHY_ID_BCM5755:    return "5755";
12306         case PHY_ID_BCM5787:    return "5787";
12307         case PHY_ID_BCM5784:    return "5784";
12308         case PHY_ID_BCM5756:    return "5722/5756";
12309         case PHY_ID_BCM5906:    return "5906";
12310         case PHY_ID_BCM5761:    return "5761";
12311         case PHY_ID_BCM8002:    return "8002/serdes";
12312         case 0:                 return "serdes";
12313         default:                return "unknown";
12314         };
12315 }
12316
12317 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12318 {
12319         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12320                 strcpy(str, "PCI Express");
12321                 return str;
12322         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12323                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12324
12325                 strcpy(str, "PCIX:");
12326
12327                 if ((clock_ctrl == 7) ||
12328                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12329                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12330                         strcat(str, "133MHz");
12331                 else if (clock_ctrl == 0)
12332                         strcat(str, "33MHz");
12333                 else if (clock_ctrl == 2)
12334                         strcat(str, "50MHz");
12335                 else if (clock_ctrl == 4)
12336                         strcat(str, "66MHz");
12337                 else if (clock_ctrl == 6)
12338                         strcat(str, "100MHz");
12339         } else {
12340                 strcpy(str, "PCI:");
12341                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12342                         strcat(str, "66MHz");
12343                 else
12344                         strcat(str, "33MHz");
12345         }
12346         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12347                 strcat(str, ":32-bit");
12348         else
12349                 strcat(str, ":64-bit");
12350         return str;
12351 }
12352
12353 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
12354 {
12355         struct pci_dev *peer;
12356         unsigned int func, devnr = tp->pdev->devfn & ~7;
12357
12358         for (func = 0; func < 8; func++) {
12359                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12360                 if (peer && peer != tp->pdev)
12361                         break;
12362                 pci_dev_put(peer);
12363         }
12364         /* 5704 can be configured in single-port mode, set peer to
12365          * tp->pdev in that case.
12366          */
12367         if (!peer) {
12368                 peer = tp->pdev;
12369                 return peer;
12370         }
12371
12372         /*
12373          * We don't need to keep the refcount elevated; there's no way
12374          * to remove one half of this device without removing the other
12375          */
12376         pci_dev_put(peer);
12377
12378         return peer;
12379 }
12380
12381 static void __devinit tg3_init_coal(struct tg3 *tp)
12382 {
12383         struct ethtool_coalesce *ec = &tp->coal;
12384
12385         memset(ec, 0, sizeof(*ec));
12386         ec->cmd = ETHTOOL_GCOALESCE;
12387         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12388         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12389         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12390         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12391         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12392         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12393         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12394         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12395         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12396
12397         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12398                                  HOSTCC_MODE_CLRTICK_TXBD)) {
12399                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12400                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12401                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12402                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12403         }
12404
12405         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12406                 ec->rx_coalesce_usecs_irq = 0;
12407                 ec->tx_coalesce_usecs_irq = 0;
12408                 ec->stats_block_coalesce_usecs = 0;
12409         }
12410 }
12411
12412 static int __devinit tg3_init_one(struct pci_dev *pdev,
12413                                   const struct pci_device_id *ent)
12414 {
12415         static int tg3_version_printed = 0;
12416         unsigned long tg3reg_base, tg3reg_len;
12417         struct net_device *dev;
12418         struct tg3 *tp;
12419         int err, pm_cap;
12420         char str[40];
12421         u64 dma_mask, persist_dma_mask;
12422         DECLARE_MAC_BUF(mac);
12423
12424         if (tg3_version_printed++ == 0)
12425                 printk(KERN_INFO "%s", version);
12426
12427         err = pci_enable_device(pdev);
12428         if (err) {
12429                 printk(KERN_ERR PFX "Cannot enable PCI device, "
12430                        "aborting.\n");
12431                 return err;
12432         }
12433
12434         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12435                 printk(KERN_ERR PFX "Cannot find proper PCI device "
12436                        "base address, aborting.\n");
12437                 err = -ENODEV;
12438                 goto err_out_disable_pdev;
12439         }
12440
12441         err = pci_request_regions(pdev, DRV_MODULE_NAME);
12442         if (err) {
12443                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12444                        "aborting.\n");
12445                 goto err_out_disable_pdev;
12446         }
12447
12448         pci_set_master(pdev);
12449
12450         /* Find power-management capability. */
12451         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12452         if (pm_cap == 0) {
12453                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12454                        "aborting.\n");
12455                 err = -EIO;
12456                 goto err_out_free_res;
12457         }
12458
12459         tg3reg_base = pci_resource_start(pdev, 0);
12460         tg3reg_len = pci_resource_len(pdev, 0);
12461
12462         dev = alloc_etherdev(sizeof(*tp));
12463         if (!dev) {
12464                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12465                 err = -ENOMEM;
12466                 goto err_out_free_res;
12467         }
12468
12469         SET_NETDEV_DEV(dev, &pdev->dev);
12470
12471 #if TG3_VLAN_TAG_USED
12472         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12473         dev->vlan_rx_register = tg3_vlan_rx_register;
12474 #endif
12475
12476         tp = netdev_priv(dev);
12477         tp->pdev = pdev;
12478         tp->dev = dev;
12479         tp->pm_cap = pm_cap;
12480         tp->mac_mode = TG3_DEF_MAC_MODE;
12481         tp->rx_mode = TG3_DEF_RX_MODE;
12482         tp->tx_mode = TG3_DEF_TX_MODE;
12483         tp->mi_mode = MAC_MI_MODE_BASE;
12484         if (tg3_debug > 0)
12485                 tp->msg_enable = tg3_debug;
12486         else
12487                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12488
12489         /* The word/byte swap controls here control register access byte
12490          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
12491          * setting below.
12492          */
12493         tp->misc_host_ctrl =
12494                 MISC_HOST_CTRL_MASK_PCI_INT |
12495                 MISC_HOST_CTRL_WORD_SWAP |
12496                 MISC_HOST_CTRL_INDIR_ACCESS |
12497                 MISC_HOST_CTRL_PCISTATE_RW;
12498
12499         /* The NONFRM (non-frame) byte/word swap controls take effect
12500          * on descriptor entries, anything which isn't packet data.
12501          *
12502          * The StrongARM chips on the board (one for tx, one for rx)
12503          * are running in big-endian mode.
12504          */
12505         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12506                         GRC_MODE_WSWAP_NONFRM_DATA);
12507 #ifdef __BIG_ENDIAN
12508         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12509 #endif
12510         spin_lock_init(&tp->lock);
12511         spin_lock_init(&tp->indirect_lock);
12512         INIT_WORK(&tp->reset_task, tg3_reset_task);
12513
12514         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
12515         if (!tp->regs) {
12516                 printk(KERN_ERR PFX "Cannot map device registers, "
12517                        "aborting.\n");
12518                 err = -ENOMEM;
12519                 goto err_out_free_dev;
12520         }
12521
12522         tg3_init_link_config(tp);
12523
12524         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12525         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12526         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12527
12528         dev->open = tg3_open;
12529         dev->stop = tg3_close;
12530         dev->get_stats = tg3_get_stats;
12531         dev->set_multicast_list = tg3_set_rx_mode;
12532         dev->set_mac_address = tg3_set_mac_addr;
12533         dev->do_ioctl = tg3_ioctl;
12534         dev->tx_timeout = tg3_tx_timeout;
12535         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
12536         dev->ethtool_ops = &tg3_ethtool_ops;
12537         dev->watchdog_timeo = TG3_TX_TIMEOUT;
12538         dev->change_mtu = tg3_change_mtu;
12539         dev->irq = pdev->irq;
12540 #ifdef CONFIG_NET_POLL_CONTROLLER
12541         dev->poll_controller = tg3_poll_controller;
12542 #endif
12543
12544         err = tg3_get_invariants(tp);
12545         if (err) {
12546                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12547                        "aborting.\n");
12548                 goto err_out_iounmap;
12549         }
12550
12551         /* The EPB bridge inside 5714, 5715, and 5780 and any
12552          * device behind the EPB cannot support DMA addresses > 40-bit.
12553          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12554          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12555          * do DMA address check in tg3_start_xmit().
12556          */
12557         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12558                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12559         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
12560                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12561 #ifdef CONFIG_HIGHMEM
12562                 dma_mask = DMA_64BIT_MASK;
12563 #endif
12564         } else
12565                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12566
12567         /* Configure DMA attributes. */
12568         if (dma_mask > DMA_32BIT_MASK) {
12569                 err = pci_set_dma_mask(pdev, dma_mask);
12570                 if (!err) {
12571                         dev->features |= NETIF_F_HIGHDMA;
12572                         err = pci_set_consistent_dma_mask(pdev,
12573                                                           persist_dma_mask);
12574                         if (err < 0) {
12575                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12576                                        "DMA for consistent allocations\n");
12577                                 goto err_out_iounmap;
12578                         }
12579                 }
12580         }
12581         if (err || dma_mask == DMA_32BIT_MASK) {
12582                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12583                 if (err) {
12584                         printk(KERN_ERR PFX "No usable DMA configuration, "
12585                                "aborting.\n");
12586                         goto err_out_iounmap;
12587                 }
12588         }
12589
12590         tg3_init_bufmgr_config(tp);
12591
12592         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12593                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12594         }
12595         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12596             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12597             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
12598             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12599             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12600                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12601         } else {
12602                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
12603         }
12604
12605         /* TSO is on by default on chips that support hardware TSO.
12606          * Firmware TSO on older chips gives lower performance, so it
12607          * is off by default, but can be enabled using ethtool.
12608          */
12609         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12610                 dev->features |= NETIF_F_TSO;
12611                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12612                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
12613                         dev->features |= NETIF_F_TSO6;
12614                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12615                         dev->features |= NETIF_F_TSO_ECN;
12616         }
12617
12618
12619         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12620             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12621             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12622                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12623                 tp->rx_pending = 63;
12624         }
12625
12626         err = tg3_get_device_address(tp);
12627         if (err) {
12628                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12629                        "aborting.\n");
12630                 goto err_out_iounmap;
12631         }
12632
12633         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12634                 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12635                         printk(KERN_ERR PFX "Cannot find proper PCI device "
12636                                "base address for APE, aborting.\n");
12637                         err = -ENODEV;
12638                         goto err_out_iounmap;
12639                 }
12640
12641                 tg3reg_base = pci_resource_start(pdev, 2);
12642                 tg3reg_len = pci_resource_len(pdev, 2);
12643
12644                 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
12645                 if (tp->aperegs == 0UL) {
12646                         printk(KERN_ERR PFX "Cannot map APE registers, "
12647                                "aborting.\n");
12648                         err = -ENOMEM;
12649                         goto err_out_iounmap;
12650                 }
12651
12652                 tg3_ape_lock_init(tp);
12653         }
12654
12655         /*
12656          * Reset chip in case UNDI or EFI driver did not shutdown
12657          * DMA self test will enable WDMAC and we'll see (spurious)
12658          * pending DMA on the PCI bus at that point.
12659          */
12660         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12661             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
12662                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
12663                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12664         }
12665
12666         err = tg3_test_dma(tp);
12667         if (err) {
12668                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
12669                 goto err_out_apeunmap;
12670         }
12671
12672         /* Tigon3 can do ipv4 only... and some chips have buggy
12673          * checksumming.
12674          */
12675         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
12676                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12677                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12678                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12679                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12680                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12681                         dev->features |= NETIF_F_IPV6_CSUM;
12682
12683                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12684         } else
12685                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12686
12687         /* flow control autonegotiation is default behavior */
12688         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12689         tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
12690
12691         tg3_init_coal(tp);
12692
12693         pci_set_drvdata(pdev, dev);
12694
12695         err = register_netdev(dev);
12696         if (err) {
12697                 printk(KERN_ERR PFX "Cannot register net device, "
12698                        "aborting.\n");
12699                 goto err_out_apeunmap;
12700         }
12701
12702         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
12703                "(%s) %s Ethernet %s\n",
12704                dev->name,
12705                tp->board_part_number,
12706                tp->pci_chip_rev_id,
12707                tg3_phy_string(tp),
12708                tg3_bus_string(tp, str),
12709                ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
12710                 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
12711                  "10/100/1000Base-T")),
12712                print_mac(mac, dev->dev_addr));
12713
12714         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
12715                "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
12716                dev->name,
12717                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
12718                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
12719                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
12720                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
12721                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
12722                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
12723         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
12724                dev->name, tp->dma_rwctrl,
12725                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
12726                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
12727
12728         return 0;
12729
12730 err_out_apeunmap:
12731         if (tp->aperegs) {
12732                 iounmap(tp->aperegs);
12733                 tp->aperegs = NULL;
12734         }
12735
12736 err_out_iounmap:
12737         if (tp->regs) {
12738                 iounmap(tp->regs);
12739                 tp->regs = NULL;
12740         }
12741
12742 err_out_free_dev:
12743         free_netdev(dev);
12744
12745 err_out_free_res:
12746         pci_release_regions(pdev);
12747
12748 err_out_disable_pdev:
12749         pci_disable_device(pdev);
12750         pci_set_drvdata(pdev, NULL);
12751         return err;
12752 }
12753
12754 static void __devexit tg3_remove_one(struct pci_dev *pdev)
12755 {
12756         struct net_device *dev = pci_get_drvdata(pdev);
12757
12758         if (dev) {
12759                 struct tg3 *tp = netdev_priv(dev);
12760
12761                 flush_scheduled_work();
12762                 unregister_netdev(dev);
12763                 if (tp->aperegs) {
12764                         iounmap(tp->aperegs);
12765                         tp->aperegs = NULL;
12766                 }
12767                 if (tp->regs) {
12768                         iounmap(tp->regs);
12769                         tp->regs = NULL;
12770                 }
12771                 free_netdev(dev);
12772                 pci_release_regions(pdev);
12773                 pci_disable_device(pdev);
12774                 pci_set_drvdata(pdev, NULL);
12775         }
12776 }
12777
12778 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
12779 {
12780         struct net_device *dev = pci_get_drvdata(pdev);
12781         struct tg3 *tp = netdev_priv(dev);
12782         int err;
12783
12784         /* PCI register 4 needs to be saved whether netif_running() or not.
12785          * MSI address and data need to be saved if using MSI and
12786          * netif_running().
12787          */
12788         pci_save_state(pdev);
12789
12790         if (!netif_running(dev))
12791                 return 0;
12792
12793         flush_scheduled_work();
12794         tg3_netif_stop(tp);
12795
12796         del_timer_sync(&tp->timer);
12797
12798         tg3_full_lock(tp, 1);
12799         tg3_disable_ints(tp);
12800         tg3_full_unlock(tp);
12801
12802         netif_device_detach(dev);
12803
12804         tg3_full_lock(tp, 0);
12805         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12806         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
12807         tg3_full_unlock(tp);
12808
12809         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
12810         if (err) {
12811                 tg3_full_lock(tp, 0);
12812
12813                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12814                 if (tg3_restart_hw(tp, 1))
12815                         goto out;
12816
12817                 tp->timer.expires = jiffies + tp->timer_offset;
12818                 add_timer(&tp->timer);
12819
12820                 netif_device_attach(dev);
12821                 tg3_netif_start(tp);
12822
12823 out:
12824                 tg3_full_unlock(tp);
12825         }
12826
12827         return err;
12828 }
12829
12830 static int tg3_resume(struct pci_dev *pdev)
12831 {
12832         struct net_device *dev = pci_get_drvdata(pdev);
12833         struct tg3 *tp = netdev_priv(dev);
12834         int err;
12835
12836         pci_restore_state(tp->pdev);
12837
12838         if (!netif_running(dev))
12839                 return 0;
12840
12841         err = tg3_set_power_state(tp, PCI_D0);
12842         if (err)
12843                 return err;
12844
12845         netif_device_attach(dev);
12846
12847         tg3_full_lock(tp, 0);
12848
12849         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12850         err = tg3_restart_hw(tp, 1);
12851         if (err)
12852                 goto out;
12853
12854         tp->timer.expires = jiffies + tp->timer_offset;
12855         add_timer(&tp->timer);
12856
12857         tg3_netif_start(tp);
12858
12859 out:
12860         tg3_full_unlock(tp);
12861
12862         return err;
12863 }
12864
12865 static struct pci_driver tg3_driver = {
12866         .name           = DRV_MODULE_NAME,
12867         .id_table       = tg3_pci_tbl,
12868         .probe          = tg3_init_one,
12869         .remove         = __devexit_p(tg3_remove_one),
12870         .suspend        = tg3_suspend,
12871         .resume         = tg3_resume
12872 };
12873
12874 static int __init tg3_init(void)
12875 {
12876         return pci_register_driver(&tg3_driver);
12877 }
12878
12879 static void __exit tg3_cleanup(void)
12880 {
12881         pci_unregister_driver(&tg3_driver);
12882 }
12883
12884 module_init(tg3_init);
12885 module_exit(tg3_cleanup);