]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/broadcom/tg3.c
tg3: Adjust BD replenish thresholds
[karo-tx-linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     121
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "November 2, 2011"
96
97 #define RESET_KIND_SHUTDOWN     0
98 #define RESET_KIND_INIT         1
99 #define RESET_KIND_SUSPEND      2
100
101 #define TG3_DEF_RX_MODE         0
102 #define TG3_DEF_TX_MODE         0
103 #define TG3_DEF_MSG_ENABLE        \
104         (NETIF_MSG_DRV          | \
105          NETIF_MSG_PROBE        | \
106          NETIF_MSG_LINK         | \
107          NETIF_MSG_TIMER        | \
108          NETIF_MSG_IFDOWN       | \
109          NETIF_MSG_IFUP         | \
110          NETIF_MSG_RX_ERR       | \
111          NETIF_MSG_TX_ERR)
112
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
114
115 /* length of time before we decide the hardware is borked,
116  * and dev->tx_timeout() should be called to fix the problem
117  */
118
119 #define TG3_TX_TIMEOUT                  (5 * HZ)
120
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU                     60
123 #define TG3_MAX_MTU(tp) \
124         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
125
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127  * You can't change the ring sizes, but you can change where you place
128  * them in the NIC onboard memory.
129  */
130 #define TG3_RX_STD_RING_SIZE(tp) \
131         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING         200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
138 #define TG3_RSS_INDIR_TBL_SIZE          128
139
140 /* Do not place this n-ring entries value into the tp struct itself,
141  * we really want to expose these constants to GCC so that modulo et
142  * al.  operations are done with shifts and masks instead of with
143  * hw multiply/modulo instructions.  Another solution would be to
144  * replace things like '% foo' with '& (foo - 1)'.
145  */
146
147 #define TG3_TX_RING_SIZE                512
148 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
149
150 #define TG3_RX_STD_RING_BYTES(tp) \
151         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
152 #define TG3_RX_JMB_RING_BYTES(tp) \
153         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
154 #define TG3_RX_RCB_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
156 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
157                                  TG3_TX_RING_SIZE)
158 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
159
160 #define TG3_DMA_BYTE_ENAB               64
161
162 #define TG3_RX_STD_DMA_SZ               1536
163 #define TG3_RX_JMB_DMA_SZ               9046
164
165 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
166
167 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
168 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
169
170 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
171         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
172
173 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
174         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
175
176 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
177  * that are at least dword aligned when used in PCIX mode.  The driver
178  * works around this bug by double copying the packet.  This workaround
179  * is built into the normal double copy length check for efficiency.
180  *
181  * However, the double copy is only necessary on those architectures
182  * where unaligned memory accesses are inefficient.  For those architectures
183  * where unaligned memory accesses incur little penalty, we can reintegrate
184  * the 5701 in the normal rx path.  Doing so saves a device structure
185  * dereference by hardcoding the double copy threshold in place.
186  */
187 #define TG3_RX_COPY_THRESHOLD           256
188 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
189         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
190 #else
191         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
192 #endif
193
194 #if (NET_IP_ALIGN != 0)
195 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
196 #else
197 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
198 #endif
199
200 /* minimum number of free TX descriptors required to wake up TX process */
201 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
202 #define TG3_TX_BD_DMA_MAX               4096
203
204 #define TG3_RAW_IP_ALIGN 2
205
206 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
207
208 #define FIRMWARE_TG3            "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
211
212 static char version[] __devinitdata =
213         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
214
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
222
223 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
226
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
309         {}
310 };
311
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
313
314 static const struct {
315         const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
317         { "rx_octets" },
318         { "rx_fragments" },
319         { "rx_ucast_packets" },
320         { "rx_mcast_packets" },
321         { "rx_bcast_packets" },
322         { "rx_fcs_errors" },
323         { "rx_align_errors" },
324         { "rx_xon_pause_rcvd" },
325         { "rx_xoff_pause_rcvd" },
326         { "rx_mac_ctrl_rcvd" },
327         { "rx_xoff_entered" },
328         { "rx_frame_too_long_errors" },
329         { "rx_jabbers" },
330         { "rx_undersize_packets" },
331         { "rx_in_length_errors" },
332         { "rx_out_length_errors" },
333         { "rx_64_or_less_octet_packets" },
334         { "rx_65_to_127_octet_packets" },
335         { "rx_128_to_255_octet_packets" },
336         { "rx_256_to_511_octet_packets" },
337         { "rx_512_to_1023_octet_packets" },
338         { "rx_1024_to_1522_octet_packets" },
339         { "rx_1523_to_2047_octet_packets" },
340         { "rx_2048_to_4095_octet_packets" },
341         { "rx_4096_to_8191_octet_packets" },
342         { "rx_8192_to_9022_octet_packets" },
343
344         { "tx_octets" },
345         { "tx_collisions" },
346
347         { "tx_xon_sent" },
348         { "tx_xoff_sent" },
349         { "tx_flow_control" },
350         { "tx_mac_errors" },
351         { "tx_single_collisions" },
352         { "tx_mult_collisions" },
353         { "tx_deferred" },
354         { "tx_excessive_collisions" },
355         { "tx_late_collisions" },
356         { "tx_collide_2times" },
357         { "tx_collide_3times" },
358         { "tx_collide_4times" },
359         { "tx_collide_5times" },
360         { "tx_collide_6times" },
361         { "tx_collide_7times" },
362         { "tx_collide_8times" },
363         { "tx_collide_9times" },
364         { "tx_collide_10times" },
365         { "tx_collide_11times" },
366         { "tx_collide_12times" },
367         { "tx_collide_13times" },
368         { "tx_collide_14times" },
369         { "tx_collide_15times" },
370         { "tx_ucast_packets" },
371         { "tx_mcast_packets" },
372         { "tx_bcast_packets" },
373         { "tx_carrier_sense_errors" },
374         { "tx_discards" },
375         { "tx_errors" },
376
377         { "dma_writeq_full" },
378         { "dma_write_prioq_full" },
379         { "rxbds_empty" },
380         { "rx_discards" },
381         { "rx_errors" },
382         { "rx_threshold_hit" },
383
384         { "dma_readq_full" },
385         { "dma_read_prioq_full" },
386         { "tx_comp_queue_full" },
387
388         { "ring_set_send_prod_index" },
389         { "ring_status_update" },
390         { "nic_irqs" },
391         { "nic_avoided_irqs" },
392         { "nic_tx_threshold_hit" },
393
394         { "mbuf_lwm_thresh_hit" },
395 };
396
397 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
398
399
400 static const struct {
401         const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403         { "nvram test        (online) " },
404         { "link test         (online) " },
405         { "register test     (offline)" },
406         { "memory test       (offline)" },
407         { "mac loopback test (offline)" },
408         { "phy loopback test (offline)" },
409         { "ext loopback test (offline)" },
410         { "interrupt test    (offline)" },
411 };
412
413 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
414
415
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
417 {
418         writel(val, tp->regs + off);
419 }
420
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
422 {
423         return readl(tp->regs + off);
424 }
425
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
427 {
428         writel(val, tp->aperegs + off);
429 }
430
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
432 {
433         return readl(tp->aperegs + off);
434 }
435
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
437 {
438         unsigned long flags;
439
440         spin_lock_irqsave(&tp->indirect_lock, flags);
441         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443         spin_unlock_irqrestore(&tp->indirect_lock, flags);
444 }
445
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
447 {
448         writel(val, tp->regs + off);
449         readl(tp->regs + off);
450 }
451
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
453 {
454         unsigned long flags;
455         u32 val;
456
457         spin_lock_irqsave(&tp->indirect_lock, flags);
458         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460         spin_unlock_irqrestore(&tp->indirect_lock, flags);
461         return val;
462 }
463
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470                                        TG3_64BIT_REG_LOW, val);
471                 return;
472         }
473         if (off == TG3_RX_STD_PROD_IDX_REG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475                                        TG3_64BIT_REG_LOW, val);
476                 return;
477         }
478
479         spin_lock_irqsave(&tp->indirect_lock, flags);
480         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482         spin_unlock_irqrestore(&tp->indirect_lock, flags);
483
484         /* In indirect mode when disabling interrupts, we also need
485          * to clear the interrupt bit in the GRC local ctrl register.
486          */
487         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488             (val == 0x1)) {
489                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
491         }
492 }
493
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
495 {
496         unsigned long flags;
497         u32 val;
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502         spin_unlock_irqrestore(&tp->indirect_lock, flags);
503         return val;
504 }
505
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507  * where it is unsafe to read back the register without some delay.
508  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
510  */
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
512 {
513         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514                 /* Non-posted methods */
515                 tp->write32(tp, off, val);
516         else {
517                 /* Posted method */
518                 tg3_write32(tp, off, val);
519                 if (usec_wait)
520                         udelay(usec_wait);
521                 tp->read32(tp, off);
522         }
523         /* Wait again after the read for the posted method to guarantee that
524          * the wait time is met.
525          */
526         if (usec_wait)
527                 udelay(usec_wait);
528 }
529
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
531 {
532         tp->write32_mbox(tp, off, val);
533         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534                 tp->read32_mbox(tp, off);
535 }
536
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
538 {
539         void __iomem *mbox = tp->regs + off;
540         writel(val, mbox);
541         if (tg3_flag(tp, TXD_MBOX_HWBUG))
542                 writel(val, mbox);
543         if (tg3_flag(tp, MBOX_WRITE_REORDER))
544                 readl(mbox);
545 }
546
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
548 {
549         return readl(tp->regs + off + GRCMBOX_BASE);
550 }
551
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
553 {
554         writel(val, tp->regs + off + GRCMBOX_BASE);
555 }
556
557 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
562
563 #define tw32(reg, val)                  tp->write32(tp, reg, val)
564 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg)                       tp->read32(tp, reg)
567
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
569 {
570         unsigned long flags;
571
572         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574                 return;
575
576         spin_lock_irqsave(&tp->indirect_lock, flags);
577         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
580
581                 /* Always leave this as zero. */
582                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583         } else {
584                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
586
587                 /* Always leave this as zero. */
588                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
589         }
590         spin_unlock_irqrestore(&tp->indirect_lock, flags);
591 }
592
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
594 {
595         unsigned long flags;
596
597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599                 *val = 0;
600                 return;
601         }
602
603         spin_lock_irqsave(&tp->indirect_lock, flags);
604         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
607
608                 /* Always leave this as zero. */
609                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610         } else {
611                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612                 *val = tr32(TG3PCI_MEM_WIN_DATA);
613
614                 /* Always leave this as zero. */
615                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
616         }
617         spin_unlock_irqrestore(&tp->indirect_lock, flags);
618 }
619
620 static void tg3_ape_lock_init(struct tg3 *tp)
621 {
622         int i;
623         u32 regbase, bit;
624
625         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626                 regbase = TG3_APE_LOCK_GRANT;
627         else
628                 regbase = TG3_APE_PER_LOCK_GRANT;
629
630         /* Make sure the driver hasn't any stale locks. */
631         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632                 switch (i) {
633                 case TG3_APE_LOCK_PHY0:
634                 case TG3_APE_LOCK_PHY1:
635                 case TG3_APE_LOCK_PHY2:
636                 case TG3_APE_LOCK_PHY3:
637                         bit = APE_LOCK_GRANT_DRIVER;
638                         break;
639                 default:
640                         if (!tp->pci_fn)
641                                 bit = APE_LOCK_GRANT_DRIVER;
642                         else
643                                 bit = 1 << tp->pci_fn;
644                 }
645                 tg3_ape_write32(tp, regbase + 4 * i, bit);
646         }
647
648 }
649
650 static int tg3_ape_lock(struct tg3 *tp, int locknum)
651 {
652         int i, off;
653         int ret = 0;
654         u32 status, req, gnt, bit;
655
656         if (!tg3_flag(tp, ENABLE_APE))
657                 return 0;
658
659         switch (locknum) {
660         case TG3_APE_LOCK_GPIO:
661                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
662                         return 0;
663         case TG3_APE_LOCK_GRC:
664         case TG3_APE_LOCK_MEM:
665                 if (!tp->pci_fn)
666                         bit = APE_LOCK_REQ_DRIVER;
667                 else
668                         bit = 1 << tp->pci_fn;
669                 break;
670         default:
671                 return -EINVAL;
672         }
673
674         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675                 req = TG3_APE_LOCK_REQ;
676                 gnt = TG3_APE_LOCK_GRANT;
677         } else {
678                 req = TG3_APE_PER_LOCK_REQ;
679                 gnt = TG3_APE_PER_LOCK_GRANT;
680         }
681
682         off = 4 * locknum;
683
684         tg3_ape_write32(tp, req + off, bit);
685
686         /* Wait for up to 1 millisecond to acquire lock. */
687         for (i = 0; i < 100; i++) {
688                 status = tg3_ape_read32(tp, gnt + off);
689                 if (status == bit)
690                         break;
691                 udelay(10);
692         }
693
694         if (status != bit) {
695                 /* Revoke the lock request. */
696                 tg3_ape_write32(tp, gnt + off, bit);
697                 ret = -EBUSY;
698         }
699
700         return ret;
701 }
702
703 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
704 {
705         u32 gnt, bit;
706
707         if (!tg3_flag(tp, ENABLE_APE))
708                 return;
709
710         switch (locknum) {
711         case TG3_APE_LOCK_GPIO:
712                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
713                         return;
714         case TG3_APE_LOCK_GRC:
715         case TG3_APE_LOCK_MEM:
716                 if (!tp->pci_fn)
717                         bit = APE_LOCK_GRANT_DRIVER;
718                 else
719                         bit = 1 << tp->pci_fn;
720                 break;
721         default:
722                 return;
723         }
724
725         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726                 gnt = TG3_APE_LOCK_GRANT;
727         else
728                 gnt = TG3_APE_PER_LOCK_GRANT;
729
730         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
731 }
732
733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
734 {
735         int i;
736         u32 apedata;
737
738         /* NCSI does not support APE events */
739         if (tg3_flag(tp, APE_HAS_NCSI))
740                 return;
741
742         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743         if (apedata != APE_SEG_SIG_MAGIC)
744                 return;
745
746         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747         if (!(apedata & APE_FW_STATUS_READY))
748                 return;
749
750         /* Wait for up to 1 millisecond for APE to service previous event. */
751         for (i = 0; i < 10; i++) {
752                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
753                         return;
754
755                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
756
757                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759                                         event | APE_EVENT_STATUS_EVENT_PENDING);
760
761                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
762
763                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
764                         break;
765
766                 udelay(100);
767         }
768
769         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
771 }
772
773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
774 {
775         u32 event;
776         u32 apedata;
777
778         if (!tg3_flag(tp, ENABLE_APE))
779                 return;
780
781         switch (kind) {
782         case RESET_KIND_INIT:
783                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784                                 APE_HOST_SEG_SIG_MAGIC);
785                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786                                 APE_HOST_SEG_LEN_MAGIC);
787                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792                                 APE_HOST_BEHAV_NO_PHYLOCK);
793                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794                                     TG3_APE_HOST_DRVR_STATE_START);
795
796                 event = APE_EVENT_STATUS_STATE_START;
797                 break;
798         case RESET_KIND_SHUTDOWN:
799                 /* With the interface we are currently using,
800                  * APE does not track driver state.  Wiping
801                  * out the HOST SEGMENT SIGNATURE forces
802                  * the APE to assume OS absent status.
803                  */
804                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
805
806                 if (device_may_wakeup(&tp->pdev->dev) &&
807                     tg3_flag(tp, WOL_ENABLE)) {
808                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809                                             TG3_APE_HOST_WOL_SPEED_AUTO);
810                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
811                 } else
812                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
813
814                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
815
816                 event = APE_EVENT_STATUS_STATE_UNLOAD;
817                 break;
818         case RESET_KIND_SUSPEND:
819                 event = APE_EVENT_STATUS_STATE_SUSPEND;
820                 break;
821         default:
822                 return;
823         }
824
825         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
826
827         tg3_ape_send_event(tp, event);
828 }
829
830 static void tg3_disable_ints(struct tg3 *tp)
831 {
832         int i;
833
834         tw32(TG3PCI_MISC_HOST_CTRL,
835              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836         for (i = 0; i < tp->irq_max; i++)
837                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
838 }
839
840 static void tg3_enable_ints(struct tg3 *tp)
841 {
842         int i;
843
844         tp->irq_sync = 0;
845         wmb();
846
847         tw32(TG3PCI_MISC_HOST_CTRL,
848              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
849
850         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851         for (i = 0; i < tp->irq_cnt; i++) {
852                 struct tg3_napi *tnapi = &tp->napi[i];
853
854                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855                 if (tg3_flag(tp, 1SHOT_MSI))
856                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
857
858                 tp->coal_now |= tnapi->coal_now;
859         }
860
861         /* Force an initial interrupt */
862         if (!tg3_flag(tp, TAGGED_STATUS) &&
863             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
865         else
866                 tw32(HOSTCC_MODE, tp->coal_now);
867
868         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
869 }
870
871 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
872 {
873         struct tg3 *tp = tnapi->tp;
874         struct tg3_hw_status *sblk = tnapi->hw_status;
875         unsigned int work_exists = 0;
876
877         /* check for phy events */
878         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879                 if (sblk->status & SD_STATUS_LINK_CHG)
880                         work_exists = 1;
881         }
882         /* check for RX/TX work to do */
883         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
884             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
885                 work_exists = 1;
886
887         return work_exists;
888 }
889
890 /* tg3_int_reenable
891  *  similar to tg3_enable_ints, but it accurately determines whether there
892  *  is new work pending and can return without flushing the PIO write
893  *  which reenables interrupts
894  */
895 static void tg3_int_reenable(struct tg3_napi *tnapi)
896 {
897         struct tg3 *tp = tnapi->tp;
898
899         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
900         mmiowb();
901
902         /* When doing tagged status, this work check is unnecessary.
903          * The last_tag we write above tells the chip which piece of
904          * work we've completed.
905          */
906         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
907                 tw32(HOSTCC_MODE, tp->coalesce_mode |
908                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
909 }
910
911 static void tg3_switch_clocks(struct tg3 *tp)
912 {
913         u32 clock_ctrl;
914         u32 orig_clock_ctrl;
915
916         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
917                 return;
918
919         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
920
921         orig_clock_ctrl = clock_ctrl;
922         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
923                        CLOCK_CTRL_CLKRUN_OENABLE |
924                        0x1f);
925         tp->pci_clock_ctrl = clock_ctrl;
926
927         if (tg3_flag(tp, 5705_PLUS)) {
928                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
929                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
930                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
931                 }
932         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
933                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
934                             clock_ctrl |
935                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
936                             40);
937                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
938                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
939                             40);
940         }
941         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
942 }
943
944 #define PHY_BUSY_LOOPS  5000
945
946 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
947 {
948         u32 frame_val;
949         unsigned int loops;
950         int ret;
951
952         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
953                 tw32_f(MAC_MI_MODE,
954                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
955                 udelay(80);
956         }
957
958         *val = 0x0;
959
960         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
961                       MI_COM_PHY_ADDR_MASK);
962         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
963                       MI_COM_REG_ADDR_MASK);
964         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
965
966         tw32_f(MAC_MI_COM, frame_val);
967
968         loops = PHY_BUSY_LOOPS;
969         while (loops != 0) {
970                 udelay(10);
971                 frame_val = tr32(MAC_MI_COM);
972
973                 if ((frame_val & MI_COM_BUSY) == 0) {
974                         udelay(5);
975                         frame_val = tr32(MAC_MI_COM);
976                         break;
977                 }
978                 loops -= 1;
979         }
980
981         ret = -EBUSY;
982         if (loops != 0) {
983                 *val = frame_val & MI_COM_DATA_MASK;
984                 ret = 0;
985         }
986
987         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
988                 tw32_f(MAC_MI_MODE, tp->mi_mode);
989                 udelay(80);
990         }
991
992         return ret;
993 }
994
995 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
996 {
997         u32 frame_val;
998         unsigned int loops;
999         int ret;
1000
1001         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1002             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1003                 return 0;
1004
1005         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1006                 tw32_f(MAC_MI_MODE,
1007                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1008                 udelay(80);
1009         }
1010
1011         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1012                       MI_COM_PHY_ADDR_MASK);
1013         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1014                       MI_COM_REG_ADDR_MASK);
1015         frame_val |= (val & MI_COM_DATA_MASK);
1016         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1017
1018         tw32_f(MAC_MI_COM, frame_val);
1019
1020         loops = PHY_BUSY_LOOPS;
1021         while (loops != 0) {
1022                 udelay(10);
1023                 frame_val = tr32(MAC_MI_COM);
1024                 if ((frame_val & MI_COM_BUSY) == 0) {
1025                         udelay(5);
1026                         frame_val = tr32(MAC_MI_COM);
1027                         break;
1028                 }
1029                 loops -= 1;
1030         }
1031
1032         ret = -EBUSY;
1033         if (loops != 0)
1034                 ret = 0;
1035
1036         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1037                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1038                 udelay(80);
1039         }
1040
1041         return ret;
1042 }
1043
1044 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1045 {
1046         int err;
1047
1048         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1049         if (err)
1050                 goto done;
1051
1052         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1053         if (err)
1054                 goto done;
1055
1056         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1057                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1058         if (err)
1059                 goto done;
1060
1061         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1062
1063 done:
1064         return err;
1065 }
1066
1067 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1068 {
1069         int err;
1070
1071         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1072         if (err)
1073                 goto done;
1074
1075         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1076         if (err)
1077                 goto done;
1078
1079         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1080                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1081         if (err)
1082                 goto done;
1083
1084         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1085
1086 done:
1087         return err;
1088 }
1089
1090 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1091 {
1092         int err;
1093
1094         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1095         if (!err)
1096                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1097
1098         return err;
1099 }
1100
1101 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1102 {
1103         int err;
1104
1105         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1106         if (!err)
1107                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1108
1109         return err;
1110 }
1111
1112 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1113 {
1114         int err;
1115
1116         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1117                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1118                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1119         if (!err)
1120                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1121
1122         return err;
1123 }
1124
1125 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1126 {
1127         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1128                 set |= MII_TG3_AUXCTL_MISC_WREN;
1129
1130         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1131 }
1132
1133 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1134         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1135                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1136                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1137
1138 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1139         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1141
1142 static int tg3_bmcr_reset(struct tg3 *tp)
1143 {
1144         u32 phy_control;
1145         int limit, err;
1146
1147         /* OK, reset it, and poll the BMCR_RESET bit until it
1148          * clears or we time out.
1149          */
1150         phy_control = BMCR_RESET;
1151         err = tg3_writephy(tp, MII_BMCR, phy_control);
1152         if (err != 0)
1153                 return -EBUSY;
1154
1155         limit = 5000;
1156         while (limit--) {
1157                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1158                 if (err != 0)
1159                         return -EBUSY;
1160
1161                 if ((phy_control & BMCR_RESET) == 0) {
1162                         udelay(40);
1163                         break;
1164                 }
1165                 udelay(10);
1166         }
1167         if (limit < 0)
1168                 return -EBUSY;
1169
1170         return 0;
1171 }
1172
1173 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1174 {
1175         struct tg3 *tp = bp->priv;
1176         u32 val;
1177
1178         spin_lock_bh(&tp->lock);
1179
1180         if (tg3_readphy(tp, reg, &val))
1181                 val = -EIO;
1182
1183         spin_unlock_bh(&tp->lock);
1184
1185         return val;
1186 }
1187
1188 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1189 {
1190         struct tg3 *tp = bp->priv;
1191         u32 ret = 0;
1192
1193         spin_lock_bh(&tp->lock);
1194
1195         if (tg3_writephy(tp, reg, val))
1196                 ret = -EIO;
1197
1198         spin_unlock_bh(&tp->lock);
1199
1200         return ret;
1201 }
1202
1203 static int tg3_mdio_reset(struct mii_bus *bp)
1204 {
1205         return 0;
1206 }
1207
1208 static void tg3_mdio_config_5785(struct tg3 *tp)
1209 {
1210         u32 val;
1211         struct phy_device *phydev;
1212
1213         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1214         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1215         case PHY_ID_BCM50610:
1216         case PHY_ID_BCM50610M:
1217                 val = MAC_PHYCFG2_50610_LED_MODES;
1218                 break;
1219         case PHY_ID_BCMAC131:
1220                 val = MAC_PHYCFG2_AC131_LED_MODES;
1221                 break;
1222         case PHY_ID_RTL8211C:
1223                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1224                 break;
1225         case PHY_ID_RTL8201E:
1226                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1227                 break;
1228         default:
1229                 return;
1230         }
1231
1232         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1233                 tw32(MAC_PHYCFG2, val);
1234
1235                 val = tr32(MAC_PHYCFG1);
1236                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1237                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1238                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1239                 tw32(MAC_PHYCFG1, val);
1240
1241                 return;
1242         }
1243
1244         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1245                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1246                        MAC_PHYCFG2_FMODE_MASK_MASK |
1247                        MAC_PHYCFG2_GMODE_MASK_MASK |
1248                        MAC_PHYCFG2_ACT_MASK_MASK   |
1249                        MAC_PHYCFG2_QUAL_MASK_MASK |
1250                        MAC_PHYCFG2_INBAND_ENABLE;
1251
1252         tw32(MAC_PHYCFG2, val);
1253
1254         val = tr32(MAC_PHYCFG1);
1255         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1256                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1257         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1258                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1259                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1260                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1261                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1262         }
1263         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1264                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1265         tw32(MAC_PHYCFG1, val);
1266
1267         val = tr32(MAC_EXT_RGMII_MODE);
1268         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1269                  MAC_RGMII_MODE_RX_QUALITY |
1270                  MAC_RGMII_MODE_RX_ACTIVITY |
1271                  MAC_RGMII_MODE_RX_ENG_DET |
1272                  MAC_RGMII_MODE_TX_ENABLE |
1273                  MAC_RGMII_MODE_TX_LOWPWR |
1274                  MAC_RGMII_MODE_TX_RESET);
1275         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1276                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1277                         val |= MAC_RGMII_MODE_RX_INT_B |
1278                                MAC_RGMII_MODE_RX_QUALITY |
1279                                MAC_RGMII_MODE_RX_ACTIVITY |
1280                                MAC_RGMII_MODE_RX_ENG_DET;
1281                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1282                         val |= MAC_RGMII_MODE_TX_ENABLE |
1283                                MAC_RGMII_MODE_TX_LOWPWR |
1284                                MAC_RGMII_MODE_TX_RESET;
1285         }
1286         tw32(MAC_EXT_RGMII_MODE, val);
1287 }
1288
1289 static void tg3_mdio_start(struct tg3 *tp)
1290 {
1291         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1292         tw32_f(MAC_MI_MODE, tp->mi_mode);
1293         udelay(80);
1294
1295         if (tg3_flag(tp, MDIOBUS_INITED) &&
1296             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1297                 tg3_mdio_config_5785(tp);
1298 }
1299
1300 static int tg3_mdio_init(struct tg3 *tp)
1301 {
1302         int i;
1303         u32 reg;
1304         struct phy_device *phydev;
1305
1306         if (tg3_flag(tp, 5717_PLUS)) {
1307                 u32 is_serdes;
1308
1309                 tp->phy_addr = tp->pci_fn + 1;
1310
1311                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1312                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1313                 else
1314                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1315                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1316                 if (is_serdes)
1317                         tp->phy_addr += 7;
1318         } else
1319                 tp->phy_addr = TG3_PHY_MII_ADDR;
1320
1321         tg3_mdio_start(tp);
1322
1323         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1324                 return 0;
1325
1326         tp->mdio_bus = mdiobus_alloc();
1327         if (tp->mdio_bus == NULL)
1328                 return -ENOMEM;
1329
1330         tp->mdio_bus->name     = "tg3 mdio bus";
1331         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1332                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1333         tp->mdio_bus->priv     = tp;
1334         tp->mdio_bus->parent   = &tp->pdev->dev;
1335         tp->mdio_bus->read     = &tg3_mdio_read;
1336         tp->mdio_bus->write    = &tg3_mdio_write;
1337         tp->mdio_bus->reset    = &tg3_mdio_reset;
1338         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1339         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1340
1341         for (i = 0; i < PHY_MAX_ADDR; i++)
1342                 tp->mdio_bus->irq[i] = PHY_POLL;
1343
1344         /* The bus registration will look for all the PHYs on the mdio bus.
1345          * Unfortunately, it does not ensure the PHY is powered up before
1346          * accessing the PHY ID registers.  A chip reset is the
1347          * quickest way to bring the device back to an operational state..
1348          */
1349         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1350                 tg3_bmcr_reset(tp);
1351
1352         i = mdiobus_register(tp->mdio_bus);
1353         if (i) {
1354                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1355                 mdiobus_free(tp->mdio_bus);
1356                 return i;
1357         }
1358
1359         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1360
1361         if (!phydev || !phydev->drv) {
1362                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1363                 mdiobus_unregister(tp->mdio_bus);
1364                 mdiobus_free(tp->mdio_bus);
1365                 return -ENODEV;
1366         }
1367
1368         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1369         case PHY_ID_BCM57780:
1370                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1371                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1372                 break;
1373         case PHY_ID_BCM50610:
1374         case PHY_ID_BCM50610M:
1375                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1376                                      PHY_BRCM_RX_REFCLK_UNUSED |
1377                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1378                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1379                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1380                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1381                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1382                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1383                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1384                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1385                 /* fallthru */
1386         case PHY_ID_RTL8211C:
1387                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1388                 break;
1389         case PHY_ID_RTL8201E:
1390         case PHY_ID_BCMAC131:
1391                 phydev->interface = PHY_INTERFACE_MODE_MII;
1392                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1393                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1394                 break;
1395         }
1396
1397         tg3_flag_set(tp, MDIOBUS_INITED);
1398
1399         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1400                 tg3_mdio_config_5785(tp);
1401
1402         return 0;
1403 }
1404
1405 static void tg3_mdio_fini(struct tg3 *tp)
1406 {
1407         if (tg3_flag(tp, MDIOBUS_INITED)) {
1408                 tg3_flag_clear(tp, MDIOBUS_INITED);
1409                 mdiobus_unregister(tp->mdio_bus);
1410                 mdiobus_free(tp->mdio_bus);
1411         }
1412 }
1413
1414 /* tp->lock is held. */
1415 static inline void tg3_generate_fw_event(struct tg3 *tp)
1416 {
1417         u32 val;
1418
1419         val = tr32(GRC_RX_CPU_EVENT);
1420         val |= GRC_RX_CPU_DRIVER_EVENT;
1421         tw32_f(GRC_RX_CPU_EVENT, val);
1422
1423         tp->last_event_jiffies = jiffies;
1424 }
1425
1426 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1427
1428 /* tp->lock is held. */
1429 static void tg3_wait_for_event_ack(struct tg3 *tp)
1430 {
1431         int i;
1432         unsigned int delay_cnt;
1433         long time_remain;
1434
1435         /* If enough time has passed, no wait is necessary. */
1436         time_remain = (long)(tp->last_event_jiffies + 1 +
1437                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1438                       (long)jiffies;
1439         if (time_remain < 0)
1440                 return;
1441
1442         /* Check if we can shorten the wait time. */
1443         delay_cnt = jiffies_to_usecs(time_remain);
1444         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1445                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1446         delay_cnt = (delay_cnt >> 3) + 1;
1447
1448         for (i = 0; i < delay_cnt; i++) {
1449                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1450                         break;
1451                 udelay(8);
1452         }
1453 }
1454
1455 /* tp->lock is held. */
1456 static void tg3_ump_link_report(struct tg3 *tp)
1457 {
1458         u32 reg;
1459         u32 val;
1460
1461         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1462                 return;
1463
1464         tg3_wait_for_event_ack(tp);
1465
1466         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1467
1468         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1469
1470         val = 0;
1471         if (!tg3_readphy(tp, MII_BMCR, &reg))
1472                 val = reg << 16;
1473         if (!tg3_readphy(tp, MII_BMSR, &reg))
1474                 val |= (reg & 0xffff);
1475         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1476
1477         val = 0;
1478         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1479                 val = reg << 16;
1480         if (!tg3_readphy(tp, MII_LPA, &reg))
1481                 val |= (reg & 0xffff);
1482         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1483
1484         val = 0;
1485         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1486                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1487                         val = reg << 16;
1488                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1489                         val |= (reg & 0xffff);
1490         }
1491         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1492
1493         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1494                 val = reg << 16;
1495         else
1496                 val = 0;
1497         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1498
1499         tg3_generate_fw_event(tp);
1500 }
1501
1502 /* tp->lock is held. */
1503 static void tg3_stop_fw(struct tg3 *tp)
1504 {
1505         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1506                 /* Wait for RX cpu to ACK the previous event. */
1507                 tg3_wait_for_event_ack(tp);
1508
1509                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1510
1511                 tg3_generate_fw_event(tp);
1512
1513                 /* Wait for RX cpu to ACK this event. */
1514                 tg3_wait_for_event_ack(tp);
1515         }
1516 }
1517
1518 /* tp->lock is held. */
1519 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1520 {
1521         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1522                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1523
1524         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1525                 switch (kind) {
1526                 case RESET_KIND_INIT:
1527                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1528                                       DRV_STATE_START);
1529                         break;
1530
1531                 case RESET_KIND_SHUTDOWN:
1532                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1533                                       DRV_STATE_UNLOAD);
1534                         break;
1535
1536                 case RESET_KIND_SUSPEND:
1537                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1538                                       DRV_STATE_SUSPEND);
1539                         break;
1540
1541                 default:
1542                         break;
1543                 }
1544         }
1545
1546         if (kind == RESET_KIND_INIT ||
1547             kind == RESET_KIND_SUSPEND)
1548                 tg3_ape_driver_state_change(tp, kind);
1549 }
1550
1551 /* tp->lock is held. */
1552 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1553 {
1554         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1555                 switch (kind) {
1556                 case RESET_KIND_INIT:
1557                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1558                                       DRV_STATE_START_DONE);
1559                         break;
1560
1561                 case RESET_KIND_SHUTDOWN:
1562                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1563                                       DRV_STATE_UNLOAD_DONE);
1564                         break;
1565
1566                 default:
1567                         break;
1568                 }
1569         }
1570
1571         if (kind == RESET_KIND_SHUTDOWN)
1572                 tg3_ape_driver_state_change(tp, kind);
1573 }
1574
1575 /* tp->lock is held. */
1576 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1577 {
1578         if (tg3_flag(tp, ENABLE_ASF)) {
1579                 switch (kind) {
1580                 case RESET_KIND_INIT:
1581                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1582                                       DRV_STATE_START);
1583                         break;
1584
1585                 case RESET_KIND_SHUTDOWN:
1586                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1587                                       DRV_STATE_UNLOAD);
1588                         break;
1589
1590                 case RESET_KIND_SUSPEND:
1591                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1592                                       DRV_STATE_SUSPEND);
1593                         break;
1594
1595                 default:
1596                         break;
1597                 }
1598         }
1599 }
1600
1601 static int tg3_poll_fw(struct tg3 *tp)
1602 {
1603         int i;
1604         u32 val;
1605
1606         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1607                 /* Wait up to 20ms for init done. */
1608                 for (i = 0; i < 200; i++) {
1609                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1610                                 return 0;
1611                         udelay(100);
1612                 }
1613                 return -ENODEV;
1614         }
1615
1616         /* Wait for firmware initialization to complete. */
1617         for (i = 0; i < 100000; i++) {
1618                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1619                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1620                         break;
1621                 udelay(10);
1622         }
1623
1624         /* Chip might not be fitted with firmware.  Some Sun onboard
1625          * parts are configured like that.  So don't signal the timeout
1626          * of the above loop as an error, but do report the lack of
1627          * running firmware once.
1628          */
1629         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1630                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1631
1632                 netdev_info(tp->dev, "No firmware running\n");
1633         }
1634
1635         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1636                 /* The 57765 A0 needs a little more
1637                  * time to do some important work.
1638                  */
1639                 mdelay(10);
1640         }
1641
1642         return 0;
1643 }
1644
1645 static void tg3_link_report(struct tg3 *tp)
1646 {
1647         if (!netif_carrier_ok(tp->dev)) {
1648                 netif_info(tp, link, tp->dev, "Link is down\n");
1649                 tg3_ump_link_report(tp);
1650         } else if (netif_msg_link(tp)) {
1651                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1652                             (tp->link_config.active_speed == SPEED_1000 ?
1653                              1000 :
1654                              (tp->link_config.active_speed == SPEED_100 ?
1655                               100 : 10)),
1656                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1657                              "full" : "half"));
1658
1659                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1660                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1661                             "on" : "off",
1662                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1663                             "on" : "off");
1664
1665                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1666                         netdev_info(tp->dev, "EEE is %s\n",
1667                                     tp->setlpicnt ? "enabled" : "disabled");
1668
1669                 tg3_ump_link_report(tp);
1670         }
1671 }
1672
1673 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1674 {
1675         u16 miireg;
1676
1677         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1678                 miireg = ADVERTISE_PAUSE_CAP;
1679         else if (flow_ctrl & FLOW_CTRL_TX)
1680                 miireg = ADVERTISE_PAUSE_ASYM;
1681         else if (flow_ctrl & FLOW_CTRL_RX)
1682                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1683         else
1684                 miireg = 0;
1685
1686         return miireg;
1687 }
1688
1689 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1690 {
1691         u16 miireg;
1692
1693         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1694                 miireg = ADVERTISE_1000XPAUSE;
1695         else if (flow_ctrl & FLOW_CTRL_TX)
1696                 miireg = ADVERTISE_1000XPSE_ASYM;
1697         else if (flow_ctrl & FLOW_CTRL_RX)
1698                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1699         else
1700                 miireg = 0;
1701
1702         return miireg;
1703 }
1704
1705 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1706 {
1707         u8 cap = 0;
1708
1709         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1710                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1711         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1712                 if (lcladv & ADVERTISE_1000XPAUSE)
1713                         cap = FLOW_CTRL_RX;
1714                 if (rmtadv & ADVERTISE_1000XPAUSE)
1715                         cap = FLOW_CTRL_TX;
1716         }
1717
1718         return cap;
1719 }
1720
1721 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1722 {
1723         u8 autoneg;
1724         u8 flowctrl = 0;
1725         u32 old_rx_mode = tp->rx_mode;
1726         u32 old_tx_mode = tp->tx_mode;
1727
1728         if (tg3_flag(tp, USE_PHYLIB))
1729                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1730         else
1731                 autoneg = tp->link_config.autoneg;
1732
1733         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1734                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1735                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1736                 else
1737                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1738         } else
1739                 flowctrl = tp->link_config.flowctrl;
1740
1741         tp->link_config.active_flowctrl = flowctrl;
1742
1743         if (flowctrl & FLOW_CTRL_RX)
1744                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1745         else
1746                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1747
1748         if (old_rx_mode != tp->rx_mode)
1749                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1750
1751         if (flowctrl & FLOW_CTRL_TX)
1752                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1753         else
1754                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1755
1756         if (old_tx_mode != tp->tx_mode)
1757                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1758 }
1759
1760 static void tg3_adjust_link(struct net_device *dev)
1761 {
1762         u8 oldflowctrl, linkmesg = 0;
1763         u32 mac_mode, lcl_adv, rmt_adv;
1764         struct tg3 *tp = netdev_priv(dev);
1765         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1766
1767         spin_lock_bh(&tp->lock);
1768
1769         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1770                                     MAC_MODE_HALF_DUPLEX);
1771
1772         oldflowctrl = tp->link_config.active_flowctrl;
1773
1774         if (phydev->link) {
1775                 lcl_adv = 0;
1776                 rmt_adv = 0;
1777
1778                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1779                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1780                 else if (phydev->speed == SPEED_1000 ||
1781                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1782                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1783                 else
1784                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1785
1786                 if (phydev->duplex == DUPLEX_HALF)
1787                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1788                 else {
1789                         lcl_adv = tg3_advert_flowctrl_1000T(
1790                                   tp->link_config.flowctrl);
1791
1792                         if (phydev->pause)
1793                                 rmt_adv = LPA_PAUSE_CAP;
1794                         if (phydev->asym_pause)
1795                                 rmt_adv |= LPA_PAUSE_ASYM;
1796                 }
1797
1798                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1799         } else
1800                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1801
1802         if (mac_mode != tp->mac_mode) {
1803                 tp->mac_mode = mac_mode;
1804                 tw32_f(MAC_MODE, tp->mac_mode);
1805                 udelay(40);
1806         }
1807
1808         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1809                 if (phydev->speed == SPEED_10)
1810                         tw32(MAC_MI_STAT,
1811                              MAC_MI_STAT_10MBPS_MODE |
1812                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1813                 else
1814                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1815         }
1816
1817         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1818                 tw32(MAC_TX_LENGTHS,
1819                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1820                       (6 << TX_LENGTHS_IPG_SHIFT) |
1821                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1822         else
1823                 tw32(MAC_TX_LENGTHS,
1824                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1825                       (6 << TX_LENGTHS_IPG_SHIFT) |
1826                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1827
1828         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1829             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1830             phydev->speed != tp->link_config.active_speed ||
1831             phydev->duplex != tp->link_config.active_duplex ||
1832             oldflowctrl != tp->link_config.active_flowctrl)
1833                 linkmesg = 1;
1834
1835         tp->link_config.active_speed = phydev->speed;
1836         tp->link_config.active_duplex = phydev->duplex;
1837
1838         spin_unlock_bh(&tp->lock);
1839
1840         if (linkmesg)
1841                 tg3_link_report(tp);
1842 }
1843
1844 static int tg3_phy_init(struct tg3 *tp)
1845 {
1846         struct phy_device *phydev;
1847
1848         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1849                 return 0;
1850
1851         /* Bring the PHY back to a known state. */
1852         tg3_bmcr_reset(tp);
1853
1854         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1855
1856         /* Attach the MAC to the PHY. */
1857         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1858                              phydev->dev_flags, phydev->interface);
1859         if (IS_ERR(phydev)) {
1860                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1861                 return PTR_ERR(phydev);
1862         }
1863
1864         /* Mask with MAC supported features. */
1865         switch (phydev->interface) {
1866         case PHY_INTERFACE_MODE_GMII:
1867         case PHY_INTERFACE_MODE_RGMII:
1868                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1869                         phydev->supported &= (PHY_GBIT_FEATURES |
1870                                               SUPPORTED_Pause |
1871                                               SUPPORTED_Asym_Pause);
1872                         break;
1873                 }
1874                 /* fallthru */
1875         case PHY_INTERFACE_MODE_MII:
1876                 phydev->supported &= (PHY_BASIC_FEATURES |
1877                                       SUPPORTED_Pause |
1878                                       SUPPORTED_Asym_Pause);
1879                 break;
1880         default:
1881                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1882                 return -EINVAL;
1883         }
1884
1885         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1886
1887         phydev->advertising = phydev->supported;
1888
1889         return 0;
1890 }
1891
1892 static void tg3_phy_start(struct tg3 *tp)
1893 {
1894         struct phy_device *phydev;
1895
1896         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1897                 return;
1898
1899         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1900
1901         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1902                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1903                 phydev->speed = tp->link_config.orig_speed;
1904                 phydev->duplex = tp->link_config.orig_duplex;
1905                 phydev->autoneg = tp->link_config.orig_autoneg;
1906                 phydev->advertising = tp->link_config.orig_advertising;
1907         }
1908
1909         phy_start(phydev);
1910
1911         phy_start_aneg(phydev);
1912 }
1913
1914 static void tg3_phy_stop(struct tg3 *tp)
1915 {
1916         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1917                 return;
1918
1919         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1920 }
1921
1922 static void tg3_phy_fini(struct tg3 *tp)
1923 {
1924         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1925                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1926                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1927         }
1928 }
1929
1930 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1931 {
1932         int err;
1933         u32 val;
1934
1935         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1936                 return 0;
1937
1938         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1939                 /* Cannot do read-modify-write on 5401 */
1940                 err = tg3_phy_auxctl_write(tp,
1941                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1942                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1943                                            0x4c20);
1944                 goto done;
1945         }
1946
1947         err = tg3_phy_auxctl_read(tp,
1948                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1949         if (err)
1950                 return err;
1951
1952         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1953         err = tg3_phy_auxctl_write(tp,
1954                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1955
1956 done:
1957         return err;
1958 }
1959
1960 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1961 {
1962         u32 phytest;
1963
1964         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1965                 u32 phy;
1966
1967                 tg3_writephy(tp, MII_TG3_FET_TEST,
1968                              phytest | MII_TG3_FET_SHADOW_EN);
1969                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1970                         if (enable)
1971                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1972                         else
1973                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1974                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1975                 }
1976                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1977         }
1978 }
1979
1980 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1981 {
1982         u32 reg;
1983
1984         if (!tg3_flag(tp, 5705_PLUS) ||
1985             (tg3_flag(tp, 5717_PLUS) &&
1986              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1987                 return;
1988
1989         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1990                 tg3_phy_fet_toggle_apd(tp, enable);
1991                 return;
1992         }
1993
1994         reg = MII_TG3_MISC_SHDW_WREN |
1995               MII_TG3_MISC_SHDW_SCR5_SEL |
1996               MII_TG3_MISC_SHDW_SCR5_LPED |
1997               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1998               MII_TG3_MISC_SHDW_SCR5_SDTL |
1999               MII_TG3_MISC_SHDW_SCR5_C125OE;
2000         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2001                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2002
2003         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2004
2005
2006         reg = MII_TG3_MISC_SHDW_WREN |
2007               MII_TG3_MISC_SHDW_APD_SEL |
2008               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2009         if (enable)
2010                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2011
2012         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2013 }
2014
2015 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2016 {
2017         u32 phy;
2018
2019         if (!tg3_flag(tp, 5705_PLUS) ||
2020             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2021                 return;
2022
2023         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2024                 u32 ephy;
2025
2026                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2027                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2028
2029                         tg3_writephy(tp, MII_TG3_FET_TEST,
2030                                      ephy | MII_TG3_FET_SHADOW_EN);
2031                         if (!tg3_readphy(tp, reg, &phy)) {
2032                                 if (enable)
2033                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2034                                 else
2035                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2036                                 tg3_writephy(tp, reg, phy);
2037                         }
2038                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2039                 }
2040         } else {
2041                 int ret;
2042
2043                 ret = tg3_phy_auxctl_read(tp,
2044                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2045                 if (!ret) {
2046                         if (enable)
2047                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2048                         else
2049                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2050                         tg3_phy_auxctl_write(tp,
2051                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2052                 }
2053         }
2054 }
2055
2056 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2057 {
2058         int ret;
2059         u32 val;
2060
2061         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2062                 return;
2063
2064         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2065         if (!ret)
2066                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2067                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2068 }
2069
2070 static void tg3_phy_apply_otp(struct tg3 *tp)
2071 {
2072         u32 otp, phy;
2073
2074         if (!tp->phy_otp)
2075                 return;
2076
2077         otp = tp->phy_otp;
2078
2079         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2080                 return;
2081
2082         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2083         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2084         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2085
2086         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2087               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2088         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2089
2090         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2091         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2092         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2093
2094         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2095         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2096
2097         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2098         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2099
2100         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2101               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2102         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2103
2104         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2105 }
2106
2107 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2108 {
2109         u32 val;
2110
2111         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2112                 return;
2113
2114         tp->setlpicnt = 0;
2115
2116         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2117             current_link_up == 1 &&
2118             tp->link_config.active_duplex == DUPLEX_FULL &&
2119             (tp->link_config.active_speed == SPEED_100 ||
2120              tp->link_config.active_speed == SPEED_1000)) {
2121                 u32 eeectl;
2122
2123                 if (tp->link_config.active_speed == SPEED_1000)
2124                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2125                 else
2126                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2127
2128                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2129
2130                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2131                                   TG3_CL45_D7_EEERES_STAT, &val);
2132
2133                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2134                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2135                         tp->setlpicnt = 2;
2136         }
2137
2138         if (!tp->setlpicnt) {
2139                 if (current_link_up == 1 &&
2140                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2141                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2142                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2143                 }
2144
2145                 val = tr32(TG3_CPMU_EEE_MODE);
2146                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2147         }
2148 }
2149
2150 static void tg3_phy_eee_enable(struct tg3 *tp)
2151 {
2152         u32 val;
2153
2154         if (tp->link_config.active_speed == SPEED_1000 &&
2155             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2156              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2157              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
2158             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2159                 val = MII_TG3_DSP_TAP26_ALNOKO |
2160                       MII_TG3_DSP_TAP26_RMRXSTO;
2161                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2162                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2163         }
2164
2165         val = tr32(TG3_CPMU_EEE_MODE);
2166         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2167 }
2168
2169 static int tg3_wait_macro_done(struct tg3 *tp)
2170 {
2171         int limit = 100;
2172
2173         while (limit--) {
2174                 u32 tmp32;
2175
2176                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2177                         if ((tmp32 & 0x1000) == 0)
2178                                 break;
2179                 }
2180         }
2181         if (limit < 0)
2182                 return -EBUSY;
2183
2184         return 0;
2185 }
2186
2187 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2188 {
2189         static const u32 test_pat[4][6] = {
2190         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2191         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2192         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2193         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2194         };
2195         int chan;
2196
2197         for (chan = 0; chan < 4; chan++) {
2198                 int i;
2199
2200                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2201                              (chan * 0x2000) | 0x0200);
2202                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2203
2204                 for (i = 0; i < 6; i++)
2205                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2206                                      test_pat[chan][i]);
2207
2208                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2209                 if (tg3_wait_macro_done(tp)) {
2210                         *resetp = 1;
2211                         return -EBUSY;
2212                 }
2213
2214                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2215                              (chan * 0x2000) | 0x0200);
2216                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2217                 if (tg3_wait_macro_done(tp)) {
2218                         *resetp = 1;
2219                         return -EBUSY;
2220                 }
2221
2222                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2223                 if (tg3_wait_macro_done(tp)) {
2224                         *resetp = 1;
2225                         return -EBUSY;
2226                 }
2227
2228                 for (i = 0; i < 6; i += 2) {
2229                         u32 low, high;
2230
2231                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2232                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2233                             tg3_wait_macro_done(tp)) {
2234                                 *resetp = 1;
2235                                 return -EBUSY;
2236                         }
2237                         low &= 0x7fff;
2238                         high &= 0x000f;
2239                         if (low != test_pat[chan][i] ||
2240                             high != test_pat[chan][i+1]) {
2241                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2242                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2243                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2244
2245                                 return -EBUSY;
2246                         }
2247                 }
2248         }
2249
2250         return 0;
2251 }
2252
2253 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2254 {
2255         int chan;
2256
2257         for (chan = 0; chan < 4; chan++) {
2258                 int i;
2259
2260                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2261                              (chan * 0x2000) | 0x0200);
2262                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2263                 for (i = 0; i < 6; i++)
2264                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2265                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2266                 if (tg3_wait_macro_done(tp))
2267                         return -EBUSY;
2268         }
2269
2270         return 0;
2271 }
2272
2273 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2274 {
2275         u32 reg32, phy9_orig;
2276         int retries, do_phy_reset, err;
2277
2278         retries = 10;
2279         do_phy_reset = 1;
2280         do {
2281                 if (do_phy_reset) {
2282                         err = tg3_bmcr_reset(tp);
2283                         if (err)
2284                                 return err;
2285                         do_phy_reset = 0;
2286                 }
2287
2288                 /* Disable transmitter and interrupt.  */
2289                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2290                         continue;
2291
2292                 reg32 |= 0x3000;
2293                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2294
2295                 /* Set full-duplex, 1000 mbps.  */
2296                 tg3_writephy(tp, MII_BMCR,
2297                              BMCR_FULLDPLX | BMCR_SPEED1000);
2298
2299                 /* Set to master mode.  */
2300                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2301                         continue;
2302
2303                 tg3_writephy(tp, MII_CTRL1000,
2304                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2305
2306                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2307                 if (err)
2308                         return err;
2309
2310                 /* Block the PHY control access.  */
2311                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2312
2313                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2314                 if (!err)
2315                         break;
2316         } while (--retries);
2317
2318         err = tg3_phy_reset_chanpat(tp);
2319         if (err)
2320                 return err;
2321
2322         tg3_phydsp_write(tp, 0x8005, 0x0000);
2323
2324         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2325         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2326
2327         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2328
2329         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2330
2331         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2332                 reg32 &= ~0x3000;
2333                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2334         } else if (!err)
2335                 err = -EBUSY;
2336
2337         return err;
2338 }
2339
2340 /* This will reset the tigon3 PHY if there is no valid
2341  * link unless the FORCE argument is non-zero.
2342  */
2343 static int tg3_phy_reset(struct tg3 *tp)
2344 {
2345         u32 val, cpmuctrl;
2346         int err;
2347
2348         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2349                 val = tr32(GRC_MISC_CFG);
2350                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2351                 udelay(40);
2352         }
2353         err  = tg3_readphy(tp, MII_BMSR, &val);
2354         err |= tg3_readphy(tp, MII_BMSR, &val);
2355         if (err != 0)
2356                 return -EBUSY;
2357
2358         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2359                 netif_carrier_off(tp->dev);
2360                 tg3_link_report(tp);
2361         }
2362
2363         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2364             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2365             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2366                 err = tg3_phy_reset_5703_4_5(tp);
2367                 if (err)
2368                         return err;
2369                 goto out;
2370         }
2371
2372         cpmuctrl = 0;
2373         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2374             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2375                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2376                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2377                         tw32(TG3_CPMU_CTRL,
2378                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2379         }
2380
2381         err = tg3_bmcr_reset(tp);
2382         if (err)
2383                 return err;
2384
2385         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2386                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2387                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2388
2389                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2390         }
2391
2392         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2393             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2394                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2395                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2396                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2397                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2398                         udelay(40);
2399                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2400                 }
2401         }
2402
2403         if (tg3_flag(tp, 5717_PLUS) &&
2404             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2405                 return 0;
2406
2407         tg3_phy_apply_otp(tp);
2408
2409         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2410                 tg3_phy_toggle_apd(tp, true);
2411         else
2412                 tg3_phy_toggle_apd(tp, false);
2413
2414 out:
2415         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2416             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2417                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2418                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2419                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2420         }
2421
2422         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2423                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2424                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2425         }
2426
2427         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2428                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2429                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2430                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2431                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2432                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2433                 }
2434         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2435                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2436                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2437                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2438                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2439                                 tg3_writephy(tp, MII_TG3_TEST1,
2440                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2441                         } else
2442                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2443
2444                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2445                 }
2446         }
2447
2448         /* Set Extended packet length bit (bit 14) on all chips that */
2449         /* support jumbo frames */
2450         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2451                 /* Cannot do read-modify-write on 5401 */
2452                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2453         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2454                 /* Set bit 14 with read-modify-write to preserve other bits */
2455                 err = tg3_phy_auxctl_read(tp,
2456                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2457                 if (!err)
2458                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2459                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2460         }
2461
2462         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2463          * jumbo frames transmission.
2464          */
2465         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2466                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2467                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2468                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2469         }
2470
2471         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2472                 /* adjust output voltage */
2473                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2474         }
2475
2476         tg3_phy_toggle_automdix(tp, 1);
2477         tg3_phy_set_wirespeed(tp);
2478         return 0;
2479 }
2480
2481 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2482 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2483 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2484                                           TG3_GPIO_MSG_NEED_VAUX)
2485 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2486         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2487          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2488          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2489          (TG3_GPIO_MSG_DRVR_PRES << 12))
2490
2491 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2492         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2493          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2494          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2495          (TG3_GPIO_MSG_NEED_VAUX << 12))
2496
2497 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2498 {
2499         u32 status, shift;
2500
2501         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2502             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2503                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2504         else
2505                 status = tr32(TG3_CPMU_DRV_STATUS);
2506
2507         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2508         status &= ~(TG3_GPIO_MSG_MASK << shift);
2509         status |= (newstat << shift);
2510
2511         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2512             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2513                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2514         else
2515                 tw32(TG3_CPMU_DRV_STATUS, status);
2516
2517         return status >> TG3_APE_GPIO_MSG_SHIFT;
2518 }
2519
2520 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2521 {
2522         if (!tg3_flag(tp, IS_NIC))
2523                 return 0;
2524
2525         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2526             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2527             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2528                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2529                         return -EIO;
2530
2531                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2532
2533                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2534                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2535
2536                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2537         } else {
2538                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2539                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2540         }
2541
2542         return 0;
2543 }
2544
2545 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2546 {
2547         u32 grc_local_ctrl;
2548
2549         if (!tg3_flag(tp, IS_NIC) ||
2550             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2551             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2552                 return;
2553
2554         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2555
2556         tw32_wait_f(GRC_LOCAL_CTRL,
2557                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2558                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2559
2560         tw32_wait_f(GRC_LOCAL_CTRL,
2561                     grc_local_ctrl,
2562                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2563
2564         tw32_wait_f(GRC_LOCAL_CTRL,
2565                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2566                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2567 }
2568
2569 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2570 {
2571         if (!tg3_flag(tp, IS_NIC))
2572                 return;
2573
2574         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2575             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2576                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2577                             (GRC_LCLCTRL_GPIO_OE0 |
2578                              GRC_LCLCTRL_GPIO_OE1 |
2579                              GRC_LCLCTRL_GPIO_OE2 |
2580                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2581                              GRC_LCLCTRL_GPIO_OUTPUT1),
2582                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2583         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2584                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2585                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2586                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2587                                      GRC_LCLCTRL_GPIO_OE1 |
2588                                      GRC_LCLCTRL_GPIO_OE2 |
2589                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2590                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2591                                      tp->grc_local_ctrl;
2592                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2593                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2594
2595                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2596                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2597                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2598
2599                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2600                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2601                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2602         } else {
2603                 u32 no_gpio2;
2604                 u32 grc_local_ctrl = 0;
2605
2606                 /* Workaround to prevent overdrawing Amps. */
2607                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2608                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2609                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2610                                     grc_local_ctrl,
2611                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2612                 }
2613
2614                 /* On 5753 and variants, GPIO2 cannot be used. */
2615                 no_gpio2 = tp->nic_sram_data_cfg &
2616                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2617
2618                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2619                                   GRC_LCLCTRL_GPIO_OE1 |
2620                                   GRC_LCLCTRL_GPIO_OE2 |
2621                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2622                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2623                 if (no_gpio2) {
2624                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2625                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2626                 }
2627                 tw32_wait_f(GRC_LOCAL_CTRL,
2628                             tp->grc_local_ctrl | grc_local_ctrl,
2629                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2630
2631                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2632
2633                 tw32_wait_f(GRC_LOCAL_CTRL,
2634                             tp->grc_local_ctrl | grc_local_ctrl,
2635                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2636
2637                 if (!no_gpio2) {
2638                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2639                         tw32_wait_f(GRC_LOCAL_CTRL,
2640                                     tp->grc_local_ctrl | grc_local_ctrl,
2641                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2642                 }
2643         }
2644 }
2645
2646 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2647 {
2648         u32 msg = 0;
2649
2650         /* Serialize power state transitions */
2651         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2652                 return;
2653
2654         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2655                 msg = TG3_GPIO_MSG_NEED_VAUX;
2656
2657         msg = tg3_set_function_status(tp, msg);
2658
2659         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2660                 goto done;
2661
2662         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2663                 tg3_pwrsrc_switch_to_vaux(tp);
2664         else
2665                 tg3_pwrsrc_die_with_vmain(tp);
2666
2667 done:
2668         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2669 }
2670
2671 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2672 {
2673         bool need_vaux = false;
2674
2675         /* The GPIOs do something completely different on 57765. */
2676         if (!tg3_flag(tp, IS_NIC) ||
2677             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2678                 return;
2679
2680         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2681             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2682             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2683                 tg3_frob_aux_power_5717(tp, include_wol ?
2684                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2685                 return;
2686         }
2687
2688         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2689                 struct net_device *dev_peer;
2690
2691                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2692
2693                 /* remove_one() may have been run on the peer. */
2694                 if (dev_peer) {
2695                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2696
2697                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2698                                 return;
2699
2700                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2701                             tg3_flag(tp_peer, ENABLE_ASF))
2702                                 need_vaux = true;
2703                 }
2704         }
2705
2706         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2707             tg3_flag(tp, ENABLE_ASF))
2708                 need_vaux = true;
2709
2710         if (need_vaux)
2711                 tg3_pwrsrc_switch_to_vaux(tp);
2712         else
2713                 tg3_pwrsrc_die_with_vmain(tp);
2714 }
2715
2716 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2717 {
2718         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2719                 return 1;
2720         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2721                 if (speed != SPEED_10)
2722                         return 1;
2723         } else if (speed == SPEED_10)
2724                 return 1;
2725
2726         return 0;
2727 }
2728
2729 static int tg3_setup_phy(struct tg3 *, int);
2730 static int tg3_halt_cpu(struct tg3 *, u32);
2731
2732 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2733 {
2734         u32 val;
2735
2736         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2737                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2738                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2739                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2740
2741                         sg_dig_ctrl |=
2742                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2743                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2744                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2745                 }
2746                 return;
2747         }
2748
2749         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2750                 tg3_bmcr_reset(tp);
2751                 val = tr32(GRC_MISC_CFG);
2752                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2753                 udelay(40);
2754                 return;
2755         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2756                 u32 phytest;
2757                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2758                         u32 phy;
2759
2760                         tg3_writephy(tp, MII_ADVERTISE, 0);
2761                         tg3_writephy(tp, MII_BMCR,
2762                                      BMCR_ANENABLE | BMCR_ANRESTART);
2763
2764                         tg3_writephy(tp, MII_TG3_FET_TEST,
2765                                      phytest | MII_TG3_FET_SHADOW_EN);
2766                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2767                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2768                                 tg3_writephy(tp,
2769                                              MII_TG3_FET_SHDW_AUXMODE4,
2770                                              phy);
2771                         }
2772                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2773                 }
2774                 return;
2775         } else if (do_low_power) {
2776                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2777                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2778
2779                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2780                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2781                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2782                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2783         }
2784
2785         /* The PHY should not be powered down on some chips because
2786          * of bugs.
2787          */
2788         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2789             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2790             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2791              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2792                 return;
2793
2794         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2795             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2796                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2797                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2798                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2799                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2800         }
2801
2802         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2803 }
2804
2805 /* tp->lock is held. */
2806 static int tg3_nvram_lock(struct tg3 *tp)
2807 {
2808         if (tg3_flag(tp, NVRAM)) {
2809                 int i;
2810
2811                 if (tp->nvram_lock_cnt == 0) {
2812                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2813                         for (i = 0; i < 8000; i++) {
2814                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2815                                         break;
2816                                 udelay(20);
2817                         }
2818                         if (i == 8000) {
2819                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2820                                 return -ENODEV;
2821                         }
2822                 }
2823                 tp->nvram_lock_cnt++;
2824         }
2825         return 0;
2826 }
2827
2828 /* tp->lock is held. */
2829 static void tg3_nvram_unlock(struct tg3 *tp)
2830 {
2831         if (tg3_flag(tp, NVRAM)) {
2832                 if (tp->nvram_lock_cnt > 0)
2833                         tp->nvram_lock_cnt--;
2834                 if (tp->nvram_lock_cnt == 0)
2835                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2836         }
2837 }
2838
2839 /* tp->lock is held. */
2840 static void tg3_enable_nvram_access(struct tg3 *tp)
2841 {
2842         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2843                 u32 nvaccess = tr32(NVRAM_ACCESS);
2844
2845                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2846         }
2847 }
2848
2849 /* tp->lock is held. */
2850 static void tg3_disable_nvram_access(struct tg3 *tp)
2851 {
2852         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2853                 u32 nvaccess = tr32(NVRAM_ACCESS);
2854
2855                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2856         }
2857 }
2858
2859 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2860                                         u32 offset, u32 *val)
2861 {
2862         u32 tmp;
2863         int i;
2864
2865         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2866                 return -EINVAL;
2867
2868         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2869                                         EEPROM_ADDR_DEVID_MASK |
2870                                         EEPROM_ADDR_READ);
2871         tw32(GRC_EEPROM_ADDR,
2872              tmp |
2873              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2874              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2875               EEPROM_ADDR_ADDR_MASK) |
2876              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2877
2878         for (i = 0; i < 1000; i++) {
2879                 tmp = tr32(GRC_EEPROM_ADDR);
2880
2881                 if (tmp & EEPROM_ADDR_COMPLETE)
2882                         break;
2883                 msleep(1);
2884         }
2885         if (!(tmp & EEPROM_ADDR_COMPLETE))
2886                 return -EBUSY;
2887
2888         tmp = tr32(GRC_EEPROM_DATA);
2889
2890         /*
2891          * The data will always be opposite the native endian
2892          * format.  Perform a blind byteswap to compensate.
2893          */
2894         *val = swab32(tmp);
2895
2896         return 0;
2897 }
2898
2899 #define NVRAM_CMD_TIMEOUT 10000
2900
2901 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2902 {
2903         int i;
2904
2905         tw32(NVRAM_CMD, nvram_cmd);
2906         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2907                 udelay(10);
2908                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2909                         udelay(10);
2910                         break;
2911                 }
2912         }
2913
2914         if (i == NVRAM_CMD_TIMEOUT)
2915                 return -EBUSY;
2916
2917         return 0;
2918 }
2919
2920 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2921 {
2922         if (tg3_flag(tp, NVRAM) &&
2923             tg3_flag(tp, NVRAM_BUFFERED) &&
2924             tg3_flag(tp, FLASH) &&
2925             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2926             (tp->nvram_jedecnum == JEDEC_ATMEL))
2927
2928                 addr = ((addr / tp->nvram_pagesize) <<
2929                         ATMEL_AT45DB0X1B_PAGE_POS) +
2930                        (addr % tp->nvram_pagesize);
2931
2932         return addr;
2933 }
2934
2935 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2936 {
2937         if (tg3_flag(tp, NVRAM) &&
2938             tg3_flag(tp, NVRAM_BUFFERED) &&
2939             tg3_flag(tp, FLASH) &&
2940             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2941             (tp->nvram_jedecnum == JEDEC_ATMEL))
2942
2943                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2944                         tp->nvram_pagesize) +
2945                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2946
2947         return addr;
2948 }
2949
2950 /* NOTE: Data read in from NVRAM is byteswapped according to
2951  * the byteswapping settings for all other register accesses.
2952  * tg3 devices are BE devices, so on a BE machine, the data
2953  * returned will be exactly as it is seen in NVRAM.  On a LE
2954  * machine, the 32-bit value will be byteswapped.
2955  */
2956 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2957 {
2958         int ret;
2959
2960         if (!tg3_flag(tp, NVRAM))
2961                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2962
2963         offset = tg3_nvram_phys_addr(tp, offset);
2964
2965         if (offset > NVRAM_ADDR_MSK)
2966                 return -EINVAL;
2967
2968         ret = tg3_nvram_lock(tp);
2969         if (ret)
2970                 return ret;
2971
2972         tg3_enable_nvram_access(tp);
2973
2974         tw32(NVRAM_ADDR, offset);
2975         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2976                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2977
2978         if (ret == 0)
2979                 *val = tr32(NVRAM_RDDATA);
2980
2981         tg3_disable_nvram_access(tp);
2982
2983         tg3_nvram_unlock(tp);
2984
2985         return ret;
2986 }
2987
2988 /* Ensures NVRAM data is in bytestream format. */
2989 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2990 {
2991         u32 v;
2992         int res = tg3_nvram_read(tp, offset, &v);
2993         if (!res)
2994                 *val = cpu_to_be32(v);
2995         return res;
2996 }
2997
2998 #define RX_CPU_SCRATCH_BASE     0x30000
2999 #define RX_CPU_SCRATCH_SIZE     0x04000
3000 #define TX_CPU_SCRATCH_BASE     0x34000
3001 #define TX_CPU_SCRATCH_SIZE     0x04000
3002
3003 /* tp->lock is held. */
3004 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3005 {
3006         int i;
3007
3008         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3009
3010         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3011                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3012
3013                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3014                 return 0;
3015         }
3016         if (offset == RX_CPU_BASE) {
3017                 for (i = 0; i < 10000; i++) {
3018                         tw32(offset + CPU_STATE, 0xffffffff);
3019                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3020                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3021                                 break;
3022                 }
3023
3024                 tw32(offset + CPU_STATE, 0xffffffff);
3025                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3026                 udelay(10);
3027         } else {
3028                 for (i = 0; i < 10000; i++) {
3029                         tw32(offset + CPU_STATE, 0xffffffff);
3030                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3031                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3032                                 break;
3033                 }
3034         }
3035
3036         if (i >= 10000) {
3037                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3038                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3039                 return -ENODEV;
3040         }
3041
3042         /* Clear firmware's nvram arbitration. */
3043         if (tg3_flag(tp, NVRAM))
3044                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3045         return 0;
3046 }
3047
3048 struct fw_info {
3049         unsigned int fw_base;
3050         unsigned int fw_len;
3051         const __be32 *fw_data;
3052 };
3053
3054 /* tp->lock is held. */
3055 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3056                                  u32 cpu_scratch_base, int cpu_scratch_size,
3057                                  struct fw_info *info)
3058 {
3059         int err, lock_err, i;
3060         void (*write_op)(struct tg3 *, u32, u32);
3061
3062         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3063                 netdev_err(tp->dev,
3064                            "%s: Trying to load TX cpu firmware which is 5705\n",
3065                            __func__);
3066                 return -EINVAL;
3067         }
3068
3069         if (tg3_flag(tp, 5705_PLUS))
3070                 write_op = tg3_write_mem;
3071         else
3072                 write_op = tg3_write_indirect_reg32;
3073
3074         /* It is possible that bootcode is still loading at this point.
3075          * Get the nvram lock first before halting the cpu.
3076          */
3077         lock_err = tg3_nvram_lock(tp);
3078         err = tg3_halt_cpu(tp, cpu_base);
3079         if (!lock_err)
3080                 tg3_nvram_unlock(tp);
3081         if (err)
3082                 goto out;
3083
3084         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3085                 write_op(tp, cpu_scratch_base + i, 0);
3086         tw32(cpu_base + CPU_STATE, 0xffffffff);
3087         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3088         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3089                 write_op(tp, (cpu_scratch_base +
3090                               (info->fw_base & 0xffff) +
3091                               (i * sizeof(u32))),
3092                               be32_to_cpu(info->fw_data[i]));
3093
3094         err = 0;
3095
3096 out:
3097         return err;
3098 }
3099
3100 /* tp->lock is held. */
3101 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3102 {
3103         struct fw_info info;
3104         const __be32 *fw_data;
3105         int err, i;
3106
3107         fw_data = (void *)tp->fw->data;
3108
3109         /* Firmware blob starts with version numbers, followed by
3110            start address and length. We are setting complete length.
3111            length = end_address_of_bss - start_address_of_text.
3112            Remainder is the blob to be loaded contiguously
3113            from start address. */
3114
3115         info.fw_base = be32_to_cpu(fw_data[1]);
3116         info.fw_len = tp->fw->size - 12;
3117         info.fw_data = &fw_data[3];
3118
3119         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3120                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3121                                     &info);
3122         if (err)
3123                 return err;
3124
3125         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3126                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3127                                     &info);
3128         if (err)
3129                 return err;
3130
3131         /* Now startup only the RX cpu. */
3132         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3133         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3134
3135         for (i = 0; i < 5; i++) {
3136                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3137                         break;
3138                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3139                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3140                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3141                 udelay(1000);
3142         }
3143         if (i >= 5) {
3144                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3145                            "should be %08x\n", __func__,
3146                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3147                 return -ENODEV;
3148         }
3149         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3150         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3151
3152         return 0;
3153 }
3154
3155 /* tp->lock is held. */
3156 static int tg3_load_tso_firmware(struct tg3 *tp)
3157 {
3158         struct fw_info info;
3159         const __be32 *fw_data;
3160         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3161         int err, i;
3162
3163         if (tg3_flag(tp, HW_TSO_1) ||
3164             tg3_flag(tp, HW_TSO_2) ||
3165             tg3_flag(tp, HW_TSO_3))
3166                 return 0;
3167
3168         fw_data = (void *)tp->fw->data;
3169
3170         /* Firmware blob starts with version numbers, followed by
3171            start address and length. We are setting complete length.
3172            length = end_address_of_bss - start_address_of_text.
3173            Remainder is the blob to be loaded contiguously
3174            from start address. */
3175
3176         info.fw_base = be32_to_cpu(fw_data[1]);
3177         cpu_scratch_size = tp->fw_len;
3178         info.fw_len = tp->fw->size - 12;
3179         info.fw_data = &fw_data[3];
3180
3181         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3182                 cpu_base = RX_CPU_BASE;
3183                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3184         } else {
3185                 cpu_base = TX_CPU_BASE;
3186                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3187                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3188         }
3189
3190         err = tg3_load_firmware_cpu(tp, cpu_base,
3191                                     cpu_scratch_base, cpu_scratch_size,
3192                                     &info);
3193         if (err)
3194                 return err;
3195
3196         /* Now startup the cpu. */
3197         tw32(cpu_base + CPU_STATE, 0xffffffff);
3198         tw32_f(cpu_base + CPU_PC, info.fw_base);
3199
3200         for (i = 0; i < 5; i++) {
3201                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3202                         break;
3203                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3204                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3205                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3206                 udelay(1000);
3207         }
3208         if (i >= 5) {
3209                 netdev_err(tp->dev,
3210                            "%s fails to set CPU PC, is %08x should be %08x\n",
3211                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3212                 return -ENODEV;
3213         }
3214         tw32(cpu_base + CPU_STATE, 0xffffffff);
3215         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3216         return 0;
3217 }
3218
3219
3220 /* tp->lock is held. */
3221 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3222 {
3223         u32 addr_high, addr_low;
3224         int i;
3225
3226         addr_high = ((tp->dev->dev_addr[0] << 8) |
3227                      tp->dev->dev_addr[1]);
3228         addr_low = ((tp->dev->dev_addr[2] << 24) |
3229                     (tp->dev->dev_addr[3] << 16) |
3230                     (tp->dev->dev_addr[4] <<  8) |
3231                     (tp->dev->dev_addr[5] <<  0));
3232         for (i = 0; i < 4; i++) {
3233                 if (i == 1 && skip_mac_1)
3234                         continue;
3235                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3236                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3237         }
3238
3239         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3240             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3241                 for (i = 0; i < 12; i++) {
3242                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3243                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3244                 }
3245         }
3246
3247         addr_high = (tp->dev->dev_addr[0] +
3248                      tp->dev->dev_addr[1] +
3249                      tp->dev->dev_addr[2] +
3250                      tp->dev->dev_addr[3] +
3251                      tp->dev->dev_addr[4] +
3252                      tp->dev->dev_addr[5]) &
3253                 TX_BACKOFF_SEED_MASK;
3254         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3255 }
3256
3257 static void tg3_enable_register_access(struct tg3 *tp)
3258 {
3259         /*
3260          * Make sure register accesses (indirect or otherwise) will function
3261          * correctly.
3262          */
3263         pci_write_config_dword(tp->pdev,
3264                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3265 }
3266
3267 static int tg3_power_up(struct tg3 *tp)
3268 {
3269         int err;
3270
3271         tg3_enable_register_access(tp);
3272
3273         err = pci_set_power_state(tp->pdev, PCI_D0);
3274         if (!err) {
3275                 /* Switch out of Vaux if it is a NIC */
3276                 tg3_pwrsrc_switch_to_vmain(tp);
3277         } else {
3278                 netdev_err(tp->dev, "Transition to D0 failed\n");
3279         }
3280
3281         return err;
3282 }
3283
3284 static int tg3_power_down_prepare(struct tg3 *tp)
3285 {
3286         u32 misc_host_ctrl;
3287         bool device_should_wake, do_low_power;
3288
3289         tg3_enable_register_access(tp);
3290
3291         /* Restore the CLKREQ setting. */
3292         if (tg3_flag(tp, CLKREQ_BUG)) {
3293                 u16 lnkctl;
3294
3295                 pci_read_config_word(tp->pdev,
3296                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3297                                      &lnkctl);
3298                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3299                 pci_write_config_word(tp->pdev,
3300                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3301                                       lnkctl);
3302         }
3303
3304         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3305         tw32(TG3PCI_MISC_HOST_CTRL,
3306              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3307
3308         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3309                              tg3_flag(tp, WOL_ENABLE);
3310
3311         if (tg3_flag(tp, USE_PHYLIB)) {
3312                 do_low_power = false;
3313                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3314                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3315                         struct phy_device *phydev;
3316                         u32 phyid, advertising;
3317
3318                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3319
3320                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3321
3322                         tp->link_config.orig_speed = phydev->speed;
3323                         tp->link_config.orig_duplex = phydev->duplex;
3324                         tp->link_config.orig_autoneg = phydev->autoneg;
3325                         tp->link_config.orig_advertising = phydev->advertising;
3326
3327                         advertising = ADVERTISED_TP |
3328                                       ADVERTISED_Pause |
3329                                       ADVERTISED_Autoneg |
3330                                       ADVERTISED_10baseT_Half;
3331
3332                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3333                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3334                                         advertising |=
3335                                                 ADVERTISED_100baseT_Half |
3336                                                 ADVERTISED_100baseT_Full |
3337                                                 ADVERTISED_10baseT_Full;
3338                                 else
3339                                         advertising |= ADVERTISED_10baseT_Full;
3340                         }
3341
3342                         phydev->advertising = advertising;
3343
3344                         phy_start_aneg(phydev);
3345
3346                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3347                         if (phyid != PHY_ID_BCMAC131) {
3348                                 phyid &= PHY_BCM_OUI_MASK;
3349                                 if (phyid == PHY_BCM_OUI_1 ||
3350                                     phyid == PHY_BCM_OUI_2 ||
3351                                     phyid == PHY_BCM_OUI_3)
3352                                         do_low_power = true;
3353                         }
3354                 }
3355         } else {
3356                 do_low_power = true;
3357
3358                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3359                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3360                         tp->link_config.orig_speed = tp->link_config.speed;
3361                         tp->link_config.orig_duplex = tp->link_config.duplex;
3362                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
3363                 }
3364
3365                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
3366                         tp->link_config.speed = SPEED_10;
3367                         tp->link_config.duplex = DUPLEX_HALF;
3368                         tp->link_config.autoneg = AUTONEG_ENABLE;
3369                         tg3_setup_phy(tp, 0);
3370                 }
3371         }
3372
3373         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3374                 u32 val;
3375
3376                 val = tr32(GRC_VCPU_EXT_CTRL);
3377                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3378         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3379                 int i;
3380                 u32 val;
3381
3382                 for (i = 0; i < 200; i++) {
3383                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3384                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3385                                 break;
3386                         msleep(1);
3387                 }
3388         }
3389         if (tg3_flag(tp, WOL_CAP))
3390                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3391                                                      WOL_DRV_STATE_SHUTDOWN |
3392                                                      WOL_DRV_WOL |
3393                                                      WOL_SET_MAGIC_PKT);
3394
3395         if (device_should_wake) {
3396                 u32 mac_mode;
3397
3398                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3399                         if (do_low_power &&
3400                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3401                                 tg3_phy_auxctl_write(tp,
3402                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3403                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3404                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3405                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3406                                 udelay(40);
3407                         }
3408
3409                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3410                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3411                         else
3412                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3413
3414                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3415                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3416                             ASIC_REV_5700) {
3417                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3418                                              SPEED_100 : SPEED_10;
3419                                 if (tg3_5700_link_polarity(tp, speed))
3420                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3421                                 else
3422                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3423                         }
3424                 } else {
3425                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3426                 }
3427
3428                 if (!tg3_flag(tp, 5750_PLUS))
3429                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3430
3431                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3432                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3433                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3434                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3435
3436                 if (tg3_flag(tp, ENABLE_APE))
3437                         mac_mode |= MAC_MODE_APE_TX_EN |
3438                                     MAC_MODE_APE_RX_EN |
3439                                     MAC_MODE_TDE_ENABLE;
3440
3441                 tw32_f(MAC_MODE, mac_mode);
3442                 udelay(100);
3443
3444                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3445                 udelay(10);
3446         }
3447
3448         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3449             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3450              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3451                 u32 base_val;
3452
3453                 base_val = tp->pci_clock_ctrl;
3454                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3455                              CLOCK_CTRL_TXCLK_DISABLE);
3456
3457                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3458                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3459         } else if (tg3_flag(tp, 5780_CLASS) ||
3460                    tg3_flag(tp, CPMU_PRESENT) ||
3461                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3462                 /* do nothing */
3463         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3464                 u32 newbits1, newbits2;
3465
3466                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3467                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3468                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3469                                     CLOCK_CTRL_TXCLK_DISABLE |
3470                                     CLOCK_CTRL_ALTCLK);
3471                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3472                 } else if (tg3_flag(tp, 5705_PLUS)) {
3473                         newbits1 = CLOCK_CTRL_625_CORE;
3474                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3475                 } else {
3476                         newbits1 = CLOCK_CTRL_ALTCLK;
3477                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3478                 }
3479
3480                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3481                             40);
3482
3483                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3484                             40);
3485
3486                 if (!tg3_flag(tp, 5705_PLUS)) {
3487                         u32 newbits3;
3488
3489                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3490                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3491                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3492                                             CLOCK_CTRL_TXCLK_DISABLE |
3493                                             CLOCK_CTRL_44MHZ_CORE);
3494                         } else {
3495                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3496                         }
3497
3498                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3499                                     tp->pci_clock_ctrl | newbits3, 40);
3500                 }
3501         }
3502
3503         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3504                 tg3_power_down_phy(tp, do_low_power);
3505
3506         tg3_frob_aux_power(tp, true);
3507
3508         /* Workaround for unstable PLL clock */
3509         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3510             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3511                 u32 val = tr32(0x7d00);
3512
3513                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3514                 tw32(0x7d00, val);
3515                 if (!tg3_flag(tp, ENABLE_ASF)) {
3516                         int err;
3517
3518                         err = tg3_nvram_lock(tp);
3519                         tg3_halt_cpu(tp, RX_CPU_BASE);
3520                         if (!err)
3521                                 tg3_nvram_unlock(tp);
3522                 }
3523         }
3524
3525         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3526
3527         return 0;
3528 }
3529
3530 static void tg3_power_down(struct tg3 *tp)
3531 {
3532         tg3_power_down_prepare(tp);
3533
3534         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3535         pci_set_power_state(tp->pdev, PCI_D3hot);
3536 }
3537
3538 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3539 {
3540         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3541         case MII_TG3_AUX_STAT_10HALF:
3542                 *speed = SPEED_10;
3543                 *duplex = DUPLEX_HALF;
3544                 break;
3545
3546         case MII_TG3_AUX_STAT_10FULL:
3547                 *speed = SPEED_10;
3548                 *duplex = DUPLEX_FULL;
3549                 break;
3550
3551         case MII_TG3_AUX_STAT_100HALF:
3552                 *speed = SPEED_100;
3553                 *duplex = DUPLEX_HALF;
3554                 break;
3555
3556         case MII_TG3_AUX_STAT_100FULL:
3557                 *speed = SPEED_100;
3558                 *duplex = DUPLEX_FULL;
3559                 break;
3560
3561         case MII_TG3_AUX_STAT_1000HALF:
3562                 *speed = SPEED_1000;
3563                 *duplex = DUPLEX_HALF;
3564                 break;
3565
3566         case MII_TG3_AUX_STAT_1000FULL:
3567                 *speed = SPEED_1000;
3568                 *duplex = DUPLEX_FULL;
3569                 break;
3570
3571         default:
3572                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3573                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3574                                  SPEED_10;
3575                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3576                                   DUPLEX_HALF;
3577                         break;
3578                 }
3579                 *speed = SPEED_INVALID;
3580                 *duplex = DUPLEX_INVALID;
3581                 break;
3582         }
3583 }
3584
3585 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3586 {
3587         int err = 0;
3588         u32 val, new_adv;
3589
3590         new_adv = ADVERTISE_CSMA;
3591         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3592         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
3593
3594         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3595         if (err)
3596                 goto done;
3597
3598         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3599                 goto done;
3600
3601         new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3602
3603         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3604             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3605                 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3606
3607         err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3608         if (err)
3609                 goto done;
3610
3611         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3612                 goto done;
3613
3614         tw32(TG3_CPMU_EEE_MODE,
3615              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3616
3617         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3618         if (!err) {
3619                 u32 err2;
3620
3621                 val = 0;
3622                 /* Advertise 100-BaseTX EEE ability */
3623                 if (advertise & ADVERTISED_100baseT_Full)
3624                         val |= MDIO_AN_EEE_ADV_100TX;
3625                 /* Advertise 1000-BaseT EEE ability */
3626                 if (advertise & ADVERTISED_1000baseT_Full)
3627                         val |= MDIO_AN_EEE_ADV_1000T;
3628                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3629                 if (err)
3630                         val = 0;
3631
3632                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3633                 case ASIC_REV_5717:
3634                 case ASIC_REV_57765:
3635                 case ASIC_REV_5719:
3636                         /* If we advertised any eee advertisements above... */
3637                         if (val)
3638                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3639                                       MII_TG3_DSP_TAP26_RMRXSTO |
3640                                       MII_TG3_DSP_TAP26_OPCSINPT;
3641                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3642                         /* Fall through */
3643                 case ASIC_REV_5720:
3644                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3645                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3646                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3647                 }
3648
3649                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3650                 if (!err)
3651                         err = err2;
3652         }
3653
3654 done:
3655         return err;
3656 }
3657
3658 static void tg3_phy_copper_begin(struct tg3 *tp)
3659 {
3660         u32 new_adv;
3661         int i;
3662
3663         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3664                 new_adv = ADVERTISED_10baseT_Half |
3665                           ADVERTISED_10baseT_Full;
3666                 if (tg3_flag(tp, WOL_SPEED_100MB))
3667                         new_adv |= ADVERTISED_100baseT_Half |
3668                                    ADVERTISED_100baseT_Full;
3669
3670                 tg3_phy_autoneg_cfg(tp, new_adv,
3671                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3672         } else if (tp->link_config.speed == SPEED_INVALID) {
3673                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3674                         tp->link_config.advertising &=
3675                                 ~(ADVERTISED_1000baseT_Half |
3676                                   ADVERTISED_1000baseT_Full);
3677
3678                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3679                                     tp->link_config.flowctrl);
3680         } else {
3681                 /* Asking for a specific link mode. */
3682                 if (tp->link_config.speed == SPEED_1000) {
3683                         if (tp->link_config.duplex == DUPLEX_FULL)
3684                                 new_adv = ADVERTISED_1000baseT_Full;
3685                         else
3686                                 new_adv = ADVERTISED_1000baseT_Half;
3687                 } else if (tp->link_config.speed == SPEED_100) {
3688                         if (tp->link_config.duplex == DUPLEX_FULL)
3689                                 new_adv = ADVERTISED_100baseT_Full;
3690                         else
3691                                 new_adv = ADVERTISED_100baseT_Half;
3692                 } else {
3693                         if (tp->link_config.duplex == DUPLEX_FULL)
3694                                 new_adv = ADVERTISED_10baseT_Full;
3695                         else
3696                                 new_adv = ADVERTISED_10baseT_Half;
3697                 }
3698
3699                 tg3_phy_autoneg_cfg(tp, new_adv,
3700                                     tp->link_config.flowctrl);
3701         }
3702
3703         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3704             tp->link_config.speed != SPEED_INVALID) {
3705                 u32 bmcr, orig_bmcr;
3706
3707                 tp->link_config.active_speed = tp->link_config.speed;
3708                 tp->link_config.active_duplex = tp->link_config.duplex;
3709
3710                 bmcr = 0;
3711                 switch (tp->link_config.speed) {
3712                 default:
3713                 case SPEED_10:
3714                         break;
3715
3716                 case SPEED_100:
3717                         bmcr |= BMCR_SPEED100;
3718                         break;
3719
3720                 case SPEED_1000:
3721                         bmcr |= BMCR_SPEED1000;
3722                         break;
3723                 }
3724
3725                 if (tp->link_config.duplex == DUPLEX_FULL)
3726                         bmcr |= BMCR_FULLDPLX;
3727
3728                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3729                     (bmcr != orig_bmcr)) {
3730                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3731                         for (i = 0; i < 1500; i++) {
3732                                 u32 tmp;
3733
3734                                 udelay(10);
3735                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3736                                     tg3_readphy(tp, MII_BMSR, &tmp))
3737                                         continue;
3738                                 if (!(tmp & BMSR_LSTATUS)) {
3739                                         udelay(40);
3740                                         break;
3741                                 }
3742                         }
3743                         tg3_writephy(tp, MII_BMCR, bmcr);
3744                         udelay(40);
3745                 }
3746         } else {
3747                 tg3_writephy(tp, MII_BMCR,
3748                              BMCR_ANENABLE | BMCR_ANRESTART);
3749         }
3750 }
3751
3752 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3753 {
3754         int err;
3755
3756         /* Turn off tap power management. */
3757         /* Set Extended packet length bit */
3758         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3759
3760         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3761         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3762         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3763         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3764         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3765
3766         udelay(40);
3767
3768         return err;
3769 }
3770
3771 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3772 {
3773         u32 adv_reg, all_mask = 0;
3774
3775         all_mask = ethtool_adv_to_mii_adv_t(mask) & ADVERTISE_ALL;
3776
3777         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3778                 return 0;
3779
3780         if ((adv_reg & ADVERTISE_ALL) != all_mask)
3781                 return 0;
3782
3783         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3784                 u32 tg3_ctrl;
3785
3786                 all_mask = ethtool_adv_to_mii_ctrl1000_t(mask);
3787
3788                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3789                         return 0;
3790
3791                 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
3792                 if (tg3_ctrl != all_mask)
3793                         return 0;
3794         }
3795
3796         return 1;
3797 }
3798
3799 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3800 {
3801         u32 curadv, reqadv;
3802
3803         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3804                 return 1;
3805
3806         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3807         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3808
3809         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3810                 if (curadv != reqadv)
3811                         return 0;
3812
3813                 if (tg3_flag(tp, PAUSE_AUTONEG))
3814                         tg3_readphy(tp, MII_LPA, rmtadv);
3815         } else {
3816                 /* Reprogram the advertisement register, even if it
3817                  * does not affect the current link.  If the link
3818                  * gets renegotiated in the future, we can save an
3819                  * additional renegotiation cycle by advertising
3820                  * it correctly in the first place.
3821                  */
3822                 if (curadv != reqadv) {
3823                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3824                                      ADVERTISE_PAUSE_ASYM);
3825                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3826                 }
3827         }
3828
3829         return 1;
3830 }
3831
3832 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3833 {
3834         int current_link_up;
3835         u32 bmsr, val;
3836         u32 lcl_adv, rmt_adv;
3837         u16 current_speed;
3838         u8 current_duplex;
3839         int i, err;
3840
3841         tw32(MAC_EVENT, 0);
3842
3843         tw32_f(MAC_STATUS,
3844              (MAC_STATUS_SYNC_CHANGED |
3845               MAC_STATUS_CFG_CHANGED |
3846               MAC_STATUS_MI_COMPLETION |
3847               MAC_STATUS_LNKSTATE_CHANGED));
3848         udelay(40);
3849
3850         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3851                 tw32_f(MAC_MI_MODE,
3852                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3853                 udelay(80);
3854         }
3855
3856         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3857
3858         /* Some third-party PHYs need to be reset on link going
3859          * down.
3860          */
3861         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3862              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3863              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3864             netif_carrier_ok(tp->dev)) {
3865                 tg3_readphy(tp, MII_BMSR, &bmsr);
3866                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3867                     !(bmsr & BMSR_LSTATUS))
3868                         force_reset = 1;
3869         }
3870         if (force_reset)
3871                 tg3_phy_reset(tp);
3872
3873         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3874                 tg3_readphy(tp, MII_BMSR, &bmsr);
3875                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3876                     !tg3_flag(tp, INIT_COMPLETE))
3877                         bmsr = 0;
3878
3879                 if (!(bmsr & BMSR_LSTATUS)) {
3880                         err = tg3_init_5401phy_dsp(tp);
3881                         if (err)
3882                                 return err;
3883
3884                         tg3_readphy(tp, MII_BMSR, &bmsr);
3885                         for (i = 0; i < 1000; i++) {
3886                                 udelay(10);
3887                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3888                                     (bmsr & BMSR_LSTATUS)) {
3889                                         udelay(40);
3890                                         break;
3891                                 }
3892                         }
3893
3894                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3895                             TG3_PHY_REV_BCM5401_B0 &&
3896                             !(bmsr & BMSR_LSTATUS) &&
3897                             tp->link_config.active_speed == SPEED_1000) {
3898                                 err = tg3_phy_reset(tp);
3899                                 if (!err)
3900                                         err = tg3_init_5401phy_dsp(tp);
3901                                 if (err)
3902                                         return err;
3903                         }
3904                 }
3905         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3906                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3907                 /* 5701 {A0,B0} CRC bug workaround */
3908                 tg3_writephy(tp, 0x15, 0x0a75);
3909                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3910                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3911                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3912         }
3913
3914         /* Clear pending interrupts... */
3915         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3916         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3917
3918         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3919                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3920         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3921                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3922
3923         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3924             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3925                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3926                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3927                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3928                 else
3929                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3930         }
3931
3932         current_link_up = 0;
3933         current_speed = SPEED_INVALID;
3934         current_duplex = DUPLEX_INVALID;
3935
3936         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3937                 err = tg3_phy_auxctl_read(tp,
3938                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3939                                           &val);
3940                 if (!err && !(val & (1 << 10))) {
3941                         tg3_phy_auxctl_write(tp,
3942                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3943                                              val | (1 << 10));
3944                         goto relink;
3945                 }
3946         }
3947
3948         bmsr = 0;
3949         for (i = 0; i < 100; i++) {
3950                 tg3_readphy(tp, MII_BMSR, &bmsr);
3951                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3952                     (bmsr & BMSR_LSTATUS))
3953                         break;
3954                 udelay(40);
3955         }
3956
3957         if (bmsr & BMSR_LSTATUS) {
3958                 u32 aux_stat, bmcr;
3959
3960                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3961                 for (i = 0; i < 2000; i++) {
3962                         udelay(10);
3963                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3964                             aux_stat)
3965                                 break;
3966                 }
3967
3968                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3969                                              &current_speed,
3970                                              &current_duplex);
3971
3972                 bmcr = 0;
3973                 for (i = 0; i < 200; i++) {
3974                         tg3_readphy(tp, MII_BMCR, &bmcr);
3975                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3976                                 continue;
3977                         if (bmcr && bmcr != 0x7fff)
3978                                 break;
3979                         udelay(10);
3980                 }
3981
3982                 lcl_adv = 0;
3983                 rmt_adv = 0;
3984
3985                 tp->link_config.active_speed = current_speed;
3986                 tp->link_config.active_duplex = current_duplex;
3987
3988                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3989                         if ((bmcr & BMCR_ANENABLE) &&
3990                             tg3_copper_is_advertising_all(tp,
3991                                                 tp->link_config.advertising)) {
3992                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3993                                                                   &rmt_adv))
3994                                         current_link_up = 1;
3995                         }
3996                 } else {
3997                         if (!(bmcr & BMCR_ANENABLE) &&
3998                             tp->link_config.speed == current_speed &&
3999                             tp->link_config.duplex == current_duplex &&
4000                             tp->link_config.flowctrl ==
4001                             tp->link_config.active_flowctrl) {
4002                                 current_link_up = 1;
4003                         }
4004                 }
4005
4006                 if (current_link_up == 1 &&
4007                     tp->link_config.active_duplex == DUPLEX_FULL)
4008                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4009         }
4010
4011 relink:
4012         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4013                 tg3_phy_copper_begin(tp);
4014
4015                 tg3_readphy(tp, MII_BMSR, &bmsr);
4016                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4017                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4018                         current_link_up = 1;
4019         }
4020
4021         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4022         if (current_link_up == 1) {
4023                 if (tp->link_config.active_speed == SPEED_100 ||
4024                     tp->link_config.active_speed == SPEED_10)
4025                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4026                 else
4027                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4028         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4029                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4030         else
4031                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4032
4033         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4034         if (tp->link_config.active_duplex == DUPLEX_HALF)
4035                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4036
4037         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4038                 if (current_link_up == 1 &&
4039                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4040                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4041                 else
4042                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4043         }
4044
4045         /* ??? Without this setting Netgear GA302T PHY does not
4046          * ??? send/receive packets...
4047          */
4048         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4049             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4050                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4051                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4052                 udelay(80);
4053         }
4054
4055         tw32_f(MAC_MODE, tp->mac_mode);
4056         udelay(40);
4057
4058         tg3_phy_eee_adjust(tp, current_link_up);
4059
4060         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4061                 /* Polled via timer. */
4062                 tw32_f(MAC_EVENT, 0);
4063         } else {
4064                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4065         }
4066         udelay(40);
4067
4068         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4069             current_link_up == 1 &&
4070             tp->link_config.active_speed == SPEED_1000 &&
4071             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4072                 udelay(120);
4073                 tw32_f(MAC_STATUS,
4074                      (MAC_STATUS_SYNC_CHANGED |
4075                       MAC_STATUS_CFG_CHANGED));
4076                 udelay(40);
4077                 tg3_write_mem(tp,
4078                               NIC_SRAM_FIRMWARE_MBOX,
4079                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4080         }
4081
4082         /* Prevent send BD corruption. */
4083         if (tg3_flag(tp, CLKREQ_BUG)) {
4084                 u16 oldlnkctl, newlnkctl;
4085
4086                 pci_read_config_word(tp->pdev,
4087                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4088                                      &oldlnkctl);
4089                 if (tp->link_config.active_speed == SPEED_100 ||
4090                     tp->link_config.active_speed == SPEED_10)
4091                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4092                 else
4093                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4094                 if (newlnkctl != oldlnkctl)
4095                         pci_write_config_word(tp->pdev,
4096                                               pci_pcie_cap(tp->pdev) +
4097                                               PCI_EXP_LNKCTL, newlnkctl);
4098         }
4099
4100         if (current_link_up != netif_carrier_ok(tp->dev)) {
4101                 if (current_link_up)
4102                         netif_carrier_on(tp->dev);
4103                 else
4104                         netif_carrier_off(tp->dev);
4105                 tg3_link_report(tp);
4106         }
4107
4108         return 0;
4109 }
4110
4111 struct tg3_fiber_aneginfo {
4112         int state;
4113 #define ANEG_STATE_UNKNOWN              0
4114 #define ANEG_STATE_AN_ENABLE            1
4115 #define ANEG_STATE_RESTART_INIT         2
4116 #define ANEG_STATE_RESTART              3
4117 #define ANEG_STATE_DISABLE_LINK_OK      4
4118 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4119 #define ANEG_STATE_ABILITY_DETECT       6
4120 #define ANEG_STATE_ACK_DETECT_INIT      7
4121 #define ANEG_STATE_ACK_DETECT           8
4122 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4123 #define ANEG_STATE_COMPLETE_ACK         10
4124 #define ANEG_STATE_IDLE_DETECT_INIT     11
4125 #define ANEG_STATE_IDLE_DETECT          12
4126 #define ANEG_STATE_LINK_OK              13
4127 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4128 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4129
4130         u32 flags;
4131 #define MR_AN_ENABLE            0x00000001
4132 #define MR_RESTART_AN           0x00000002
4133 #define MR_AN_COMPLETE          0x00000004
4134 #define MR_PAGE_RX              0x00000008
4135 #define MR_NP_LOADED            0x00000010
4136 #define MR_TOGGLE_TX            0x00000020
4137 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4138 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4139 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4140 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4141 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4142 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4143 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4144 #define MR_TOGGLE_RX            0x00002000
4145 #define MR_NP_RX                0x00004000
4146
4147 #define MR_LINK_OK              0x80000000
4148
4149         unsigned long link_time, cur_time;
4150
4151         u32 ability_match_cfg;
4152         int ability_match_count;
4153
4154         char ability_match, idle_match, ack_match;
4155
4156         u32 txconfig, rxconfig;
4157 #define ANEG_CFG_NP             0x00000080
4158 #define ANEG_CFG_ACK            0x00000040
4159 #define ANEG_CFG_RF2            0x00000020
4160 #define ANEG_CFG_RF1            0x00000010
4161 #define ANEG_CFG_PS2            0x00000001
4162 #define ANEG_CFG_PS1            0x00008000
4163 #define ANEG_CFG_HD             0x00004000
4164 #define ANEG_CFG_FD             0x00002000
4165 #define ANEG_CFG_INVAL          0x00001f06
4166
4167 };
4168 #define ANEG_OK         0
4169 #define ANEG_DONE       1
4170 #define ANEG_TIMER_ENAB 2
4171 #define ANEG_FAILED     -1
4172
4173 #define ANEG_STATE_SETTLE_TIME  10000
4174
4175 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4176                                    struct tg3_fiber_aneginfo *ap)
4177 {
4178         u16 flowctrl;
4179         unsigned long delta;
4180         u32 rx_cfg_reg;
4181         int ret;
4182
4183         if (ap->state == ANEG_STATE_UNKNOWN) {
4184                 ap->rxconfig = 0;
4185                 ap->link_time = 0;
4186                 ap->cur_time = 0;
4187                 ap->ability_match_cfg = 0;
4188                 ap->ability_match_count = 0;
4189                 ap->ability_match = 0;
4190                 ap->idle_match = 0;
4191                 ap->ack_match = 0;
4192         }
4193         ap->cur_time++;
4194
4195         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4196                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4197
4198                 if (rx_cfg_reg != ap->ability_match_cfg) {
4199                         ap->ability_match_cfg = rx_cfg_reg;
4200                         ap->ability_match = 0;
4201                         ap->ability_match_count = 0;
4202                 } else {
4203                         if (++ap->ability_match_count > 1) {
4204                                 ap->ability_match = 1;
4205                                 ap->ability_match_cfg = rx_cfg_reg;
4206                         }
4207                 }
4208                 if (rx_cfg_reg & ANEG_CFG_ACK)
4209                         ap->ack_match = 1;
4210                 else
4211                         ap->ack_match = 0;
4212
4213                 ap->idle_match = 0;
4214         } else {
4215                 ap->idle_match = 1;
4216                 ap->ability_match_cfg = 0;
4217                 ap->ability_match_count = 0;
4218                 ap->ability_match = 0;
4219                 ap->ack_match = 0;
4220
4221                 rx_cfg_reg = 0;
4222         }
4223
4224         ap->rxconfig = rx_cfg_reg;
4225         ret = ANEG_OK;
4226
4227         switch (ap->state) {
4228         case ANEG_STATE_UNKNOWN:
4229                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4230                         ap->state = ANEG_STATE_AN_ENABLE;
4231
4232                 /* fallthru */
4233         case ANEG_STATE_AN_ENABLE:
4234                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4235                 if (ap->flags & MR_AN_ENABLE) {
4236                         ap->link_time = 0;
4237                         ap->cur_time = 0;
4238                         ap->ability_match_cfg = 0;
4239                         ap->ability_match_count = 0;
4240                         ap->ability_match = 0;
4241                         ap->idle_match = 0;
4242                         ap->ack_match = 0;
4243
4244                         ap->state = ANEG_STATE_RESTART_INIT;
4245                 } else {
4246                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4247                 }
4248                 break;
4249
4250         case ANEG_STATE_RESTART_INIT:
4251                 ap->link_time = ap->cur_time;
4252                 ap->flags &= ~(MR_NP_LOADED);
4253                 ap->txconfig = 0;
4254                 tw32(MAC_TX_AUTO_NEG, 0);
4255                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4256                 tw32_f(MAC_MODE, tp->mac_mode);
4257                 udelay(40);
4258
4259                 ret = ANEG_TIMER_ENAB;
4260                 ap->state = ANEG_STATE_RESTART;
4261
4262                 /* fallthru */
4263         case ANEG_STATE_RESTART:
4264                 delta = ap->cur_time - ap->link_time;
4265                 if (delta > ANEG_STATE_SETTLE_TIME)
4266                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4267                 else
4268                         ret = ANEG_TIMER_ENAB;
4269                 break;
4270
4271         case ANEG_STATE_DISABLE_LINK_OK:
4272                 ret = ANEG_DONE;
4273                 break;
4274
4275         case ANEG_STATE_ABILITY_DETECT_INIT:
4276                 ap->flags &= ~(MR_TOGGLE_TX);
4277                 ap->txconfig = ANEG_CFG_FD;
4278                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4279                 if (flowctrl & ADVERTISE_1000XPAUSE)
4280                         ap->txconfig |= ANEG_CFG_PS1;
4281                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4282                         ap->txconfig |= ANEG_CFG_PS2;
4283                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4284                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4285                 tw32_f(MAC_MODE, tp->mac_mode);
4286                 udelay(40);
4287
4288                 ap->state = ANEG_STATE_ABILITY_DETECT;
4289                 break;
4290
4291         case ANEG_STATE_ABILITY_DETECT:
4292                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4293                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4294                 break;
4295
4296         case ANEG_STATE_ACK_DETECT_INIT:
4297                 ap->txconfig |= ANEG_CFG_ACK;
4298                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4299                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4300                 tw32_f(MAC_MODE, tp->mac_mode);
4301                 udelay(40);
4302
4303                 ap->state = ANEG_STATE_ACK_DETECT;
4304
4305                 /* fallthru */
4306         case ANEG_STATE_ACK_DETECT:
4307                 if (ap->ack_match != 0) {
4308                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4309                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4310                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4311                         } else {
4312                                 ap->state = ANEG_STATE_AN_ENABLE;
4313                         }
4314                 } else if (ap->ability_match != 0 &&
4315                            ap->rxconfig == 0) {
4316                         ap->state = ANEG_STATE_AN_ENABLE;
4317                 }
4318                 break;
4319
4320         case ANEG_STATE_COMPLETE_ACK_INIT:
4321                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4322                         ret = ANEG_FAILED;
4323                         break;
4324                 }
4325                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4326                                MR_LP_ADV_HALF_DUPLEX |
4327                                MR_LP_ADV_SYM_PAUSE |
4328                                MR_LP_ADV_ASYM_PAUSE |
4329                                MR_LP_ADV_REMOTE_FAULT1 |
4330                                MR_LP_ADV_REMOTE_FAULT2 |
4331                                MR_LP_ADV_NEXT_PAGE |
4332                                MR_TOGGLE_RX |
4333                                MR_NP_RX);
4334                 if (ap->rxconfig & ANEG_CFG_FD)
4335                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4336                 if (ap->rxconfig & ANEG_CFG_HD)
4337                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4338                 if (ap->rxconfig & ANEG_CFG_PS1)
4339                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4340                 if (ap->rxconfig & ANEG_CFG_PS2)
4341                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4342                 if (ap->rxconfig & ANEG_CFG_RF1)
4343                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4344                 if (ap->rxconfig & ANEG_CFG_RF2)
4345                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4346                 if (ap->rxconfig & ANEG_CFG_NP)
4347                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4348
4349                 ap->link_time = ap->cur_time;
4350
4351                 ap->flags ^= (MR_TOGGLE_TX);
4352                 if (ap->rxconfig & 0x0008)
4353                         ap->flags |= MR_TOGGLE_RX;
4354                 if (ap->rxconfig & ANEG_CFG_NP)
4355                         ap->flags |= MR_NP_RX;
4356                 ap->flags |= MR_PAGE_RX;
4357
4358                 ap->state = ANEG_STATE_COMPLETE_ACK;
4359                 ret = ANEG_TIMER_ENAB;
4360                 break;
4361
4362         case ANEG_STATE_COMPLETE_ACK:
4363                 if (ap->ability_match != 0 &&
4364                     ap->rxconfig == 0) {
4365                         ap->state = ANEG_STATE_AN_ENABLE;
4366                         break;
4367                 }
4368                 delta = ap->cur_time - ap->link_time;
4369                 if (delta > ANEG_STATE_SETTLE_TIME) {
4370                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4371                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4372                         } else {
4373                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4374                                     !(ap->flags & MR_NP_RX)) {
4375                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4376                                 } else {
4377                                         ret = ANEG_FAILED;
4378                                 }
4379                         }
4380                 }
4381                 break;
4382
4383         case ANEG_STATE_IDLE_DETECT_INIT:
4384                 ap->link_time = ap->cur_time;
4385                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4386                 tw32_f(MAC_MODE, tp->mac_mode);
4387                 udelay(40);
4388
4389                 ap->state = ANEG_STATE_IDLE_DETECT;
4390                 ret = ANEG_TIMER_ENAB;
4391                 break;
4392
4393         case ANEG_STATE_IDLE_DETECT:
4394                 if (ap->ability_match != 0 &&
4395                     ap->rxconfig == 0) {
4396                         ap->state = ANEG_STATE_AN_ENABLE;
4397                         break;
4398                 }
4399                 delta = ap->cur_time - ap->link_time;
4400                 if (delta > ANEG_STATE_SETTLE_TIME) {
4401                         /* XXX another gem from the Broadcom driver :( */
4402                         ap->state = ANEG_STATE_LINK_OK;
4403                 }
4404                 break;
4405
4406         case ANEG_STATE_LINK_OK:
4407                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4408                 ret = ANEG_DONE;
4409                 break;
4410
4411         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4412                 /* ??? unimplemented */
4413                 break;
4414
4415         case ANEG_STATE_NEXT_PAGE_WAIT:
4416                 /* ??? unimplemented */
4417                 break;
4418
4419         default:
4420                 ret = ANEG_FAILED;
4421                 break;
4422         }
4423
4424         return ret;
4425 }
4426
4427 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4428 {
4429         int res = 0;
4430         struct tg3_fiber_aneginfo aninfo;
4431         int status = ANEG_FAILED;
4432         unsigned int tick;
4433         u32 tmp;
4434
4435         tw32_f(MAC_TX_AUTO_NEG, 0);
4436
4437         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4438         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4439         udelay(40);
4440
4441         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4442         udelay(40);
4443
4444         memset(&aninfo, 0, sizeof(aninfo));
4445         aninfo.flags |= MR_AN_ENABLE;
4446         aninfo.state = ANEG_STATE_UNKNOWN;
4447         aninfo.cur_time = 0;
4448         tick = 0;
4449         while (++tick < 195000) {
4450                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4451                 if (status == ANEG_DONE || status == ANEG_FAILED)
4452                         break;
4453
4454                 udelay(1);
4455         }
4456
4457         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4458         tw32_f(MAC_MODE, tp->mac_mode);
4459         udelay(40);
4460
4461         *txflags = aninfo.txconfig;
4462         *rxflags = aninfo.flags;
4463
4464         if (status == ANEG_DONE &&
4465             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4466                              MR_LP_ADV_FULL_DUPLEX)))
4467                 res = 1;
4468
4469         return res;
4470 }
4471
4472 static void tg3_init_bcm8002(struct tg3 *tp)
4473 {
4474         u32 mac_status = tr32(MAC_STATUS);
4475         int i;
4476
4477         /* Reset when initting first time or we have a link. */
4478         if (tg3_flag(tp, INIT_COMPLETE) &&
4479             !(mac_status & MAC_STATUS_PCS_SYNCED))
4480                 return;
4481
4482         /* Set PLL lock range. */
4483         tg3_writephy(tp, 0x16, 0x8007);
4484
4485         /* SW reset */
4486         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4487
4488         /* Wait for reset to complete. */
4489         /* XXX schedule_timeout() ... */
4490         for (i = 0; i < 500; i++)
4491                 udelay(10);
4492
4493         /* Config mode; select PMA/Ch 1 regs. */
4494         tg3_writephy(tp, 0x10, 0x8411);
4495
4496         /* Enable auto-lock and comdet, select txclk for tx. */
4497         tg3_writephy(tp, 0x11, 0x0a10);
4498
4499         tg3_writephy(tp, 0x18, 0x00a0);
4500         tg3_writephy(tp, 0x16, 0x41ff);
4501
4502         /* Assert and deassert POR. */
4503         tg3_writephy(tp, 0x13, 0x0400);
4504         udelay(40);
4505         tg3_writephy(tp, 0x13, 0x0000);
4506
4507         tg3_writephy(tp, 0x11, 0x0a50);
4508         udelay(40);
4509         tg3_writephy(tp, 0x11, 0x0a10);
4510
4511         /* Wait for signal to stabilize */
4512         /* XXX schedule_timeout() ... */
4513         for (i = 0; i < 15000; i++)
4514                 udelay(10);
4515
4516         /* Deselect the channel register so we can read the PHYID
4517          * later.
4518          */
4519         tg3_writephy(tp, 0x10, 0x8011);
4520 }
4521
4522 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4523 {
4524         u16 flowctrl;
4525         u32 sg_dig_ctrl, sg_dig_status;
4526         u32 serdes_cfg, expected_sg_dig_ctrl;
4527         int workaround, port_a;
4528         int current_link_up;
4529
4530         serdes_cfg = 0;
4531         expected_sg_dig_ctrl = 0;
4532         workaround = 0;
4533         port_a = 1;
4534         current_link_up = 0;
4535
4536         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4537             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4538                 workaround = 1;
4539                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4540                         port_a = 0;
4541
4542                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4543                 /* preserve bits 20-23 for voltage regulator */
4544                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4545         }
4546
4547         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4548
4549         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4550                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4551                         if (workaround) {
4552                                 u32 val = serdes_cfg;
4553
4554                                 if (port_a)
4555                                         val |= 0xc010000;
4556                                 else
4557                                         val |= 0x4010000;
4558                                 tw32_f(MAC_SERDES_CFG, val);
4559                         }
4560
4561                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4562                 }
4563                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4564                         tg3_setup_flow_control(tp, 0, 0);
4565                         current_link_up = 1;
4566                 }
4567                 goto out;
4568         }
4569
4570         /* Want auto-negotiation.  */
4571         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4572
4573         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4574         if (flowctrl & ADVERTISE_1000XPAUSE)
4575                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4576         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4577                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4578
4579         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4580                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4581                     tp->serdes_counter &&
4582                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4583                                     MAC_STATUS_RCVD_CFG)) ==
4584                      MAC_STATUS_PCS_SYNCED)) {
4585                         tp->serdes_counter--;
4586                         current_link_up = 1;
4587                         goto out;
4588                 }
4589 restart_autoneg:
4590                 if (workaround)
4591                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4592                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4593                 udelay(5);
4594                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4595
4596                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4597                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4598         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4599                                  MAC_STATUS_SIGNAL_DET)) {
4600                 sg_dig_status = tr32(SG_DIG_STATUS);
4601                 mac_status = tr32(MAC_STATUS);
4602
4603                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4604                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4605                         u32 local_adv = 0, remote_adv = 0;
4606
4607                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4608                                 local_adv |= ADVERTISE_1000XPAUSE;
4609                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4610                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4611
4612                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4613                                 remote_adv |= LPA_1000XPAUSE;
4614                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4615                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4616
4617                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4618                         current_link_up = 1;
4619                         tp->serdes_counter = 0;
4620                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4621                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4622                         if (tp->serdes_counter)
4623                                 tp->serdes_counter--;
4624                         else {
4625                                 if (workaround) {
4626                                         u32 val = serdes_cfg;
4627
4628                                         if (port_a)
4629                                                 val |= 0xc010000;
4630                                         else
4631                                                 val |= 0x4010000;
4632
4633                                         tw32_f(MAC_SERDES_CFG, val);
4634                                 }
4635
4636                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4637                                 udelay(40);
4638
4639                                 /* Link parallel detection - link is up */
4640                                 /* only if we have PCS_SYNC and not */
4641                                 /* receiving config code words */
4642                                 mac_status = tr32(MAC_STATUS);
4643                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4644                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4645                                         tg3_setup_flow_control(tp, 0, 0);
4646                                         current_link_up = 1;
4647                                         tp->phy_flags |=
4648                                                 TG3_PHYFLG_PARALLEL_DETECT;
4649                                         tp->serdes_counter =
4650                                                 SERDES_PARALLEL_DET_TIMEOUT;
4651                                 } else
4652                                         goto restart_autoneg;
4653                         }
4654                 }
4655         } else {
4656                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4657                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4658         }
4659
4660 out:
4661         return current_link_up;
4662 }
4663
4664 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4665 {
4666         int current_link_up = 0;
4667
4668         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4669                 goto out;
4670
4671         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4672                 u32 txflags, rxflags;
4673                 int i;
4674
4675                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4676                         u32 local_adv = 0, remote_adv = 0;
4677
4678                         if (txflags & ANEG_CFG_PS1)
4679                                 local_adv |= ADVERTISE_1000XPAUSE;
4680                         if (txflags & ANEG_CFG_PS2)
4681                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4682
4683                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4684                                 remote_adv |= LPA_1000XPAUSE;
4685                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4686                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4687
4688                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4689
4690                         current_link_up = 1;
4691                 }
4692                 for (i = 0; i < 30; i++) {
4693                         udelay(20);
4694                         tw32_f(MAC_STATUS,
4695                                (MAC_STATUS_SYNC_CHANGED |
4696                                 MAC_STATUS_CFG_CHANGED));
4697                         udelay(40);
4698                         if ((tr32(MAC_STATUS) &
4699                              (MAC_STATUS_SYNC_CHANGED |
4700                               MAC_STATUS_CFG_CHANGED)) == 0)
4701                                 break;
4702                 }
4703
4704                 mac_status = tr32(MAC_STATUS);
4705                 if (current_link_up == 0 &&
4706                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4707                     !(mac_status & MAC_STATUS_RCVD_CFG))
4708                         current_link_up = 1;
4709         } else {
4710                 tg3_setup_flow_control(tp, 0, 0);
4711
4712                 /* Forcing 1000FD link up. */
4713                 current_link_up = 1;
4714
4715                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4716                 udelay(40);
4717
4718                 tw32_f(MAC_MODE, tp->mac_mode);
4719                 udelay(40);
4720         }
4721
4722 out:
4723         return current_link_up;
4724 }
4725
4726 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4727 {
4728         u32 orig_pause_cfg;
4729         u16 orig_active_speed;
4730         u8 orig_active_duplex;
4731         u32 mac_status;
4732         int current_link_up;
4733         int i;
4734
4735         orig_pause_cfg = tp->link_config.active_flowctrl;
4736         orig_active_speed = tp->link_config.active_speed;
4737         orig_active_duplex = tp->link_config.active_duplex;
4738
4739         if (!tg3_flag(tp, HW_AUTONEG) &&
4740             netif_carrier_ok(tp->dev) &&
4741             tg3_flag(tp, INIT_COMPLETE)) {
4742                 mac_status = tr32(MAC_STATUS);
4743                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4744                                MAC_STATUS_SIGNAL_DET |
4745                                MAC_STATUS_CFG_CHANGED |
4746                                MAC_STATUS_RCVD_CFG);
4747                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4748                                    MAC_STATUS_SIGNAL_DET)) {
4749                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4750                                             MAC_STATUS_CFG_CHANGED));
4751                         return 0;
4752                 }
4753         }
4754
4755         tw32_f(MAC_TX_AUTO_NEG, 0);
4756
4757         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4758         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4759         tw32_f(MAC_MODE, tp->mac_mode);
4760         udelay(40);
4761
4762         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4763                 tg3_init_bcm8002(tp);
4764
4765         /* Enable link change event even when serdes polling.  */
4766         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4767         udelay(40);
4768
4769         current_link_up = 0;
4770         mac_status = tr32(MAC_STATUS);
4771
4772         if (tg3_flag(tp, HW_AUTONEG))
4773                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4774         else
4775                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4776
4777         tp->napi[0].hw_status->status =
4778                 (SD_STATUS_UPDATED |
4779                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4780
4781         for (i = 0; i < 100; i++) {
4782                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4783                                     MAC_STATUS_CFG_CHANGED));
4784                 udelay(5);
4785                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4786                                          MAC_STATUS_CFG_CHANGED |
4787                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4788                         break;
4789         }
4790
4791         mac_status = tr32(MAC_STATUS);
4792         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4793                 current_link_up = 0;
4794                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4795                     tp->serdes_counter == 0) {
4796                         tw32_f(MAC_MODE, (tp->mac_mode |
4797                                           MAC_MODE_SEND_CONFIGS));
4798                         udelay(1);
4799                         tw32_f(MAC_MODE, tp->mac_mode);
4800                 }
4801         }
4802
4803         if (current_link_up == 1) {
4804                 tp->link_config.active_speed = SPEED_1000;
4805                 tp->link_config.active_duplex = DUPLEX_FULL;
4806                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4807                                     LED_CTRL_LNKLED_OVERRIDE |
4808                                     LED_CTRL_1000MBPS_ON));
4809         } else {
4810                 tp->link_config.active_speed = SPEED_INVALID;
4811                 tp->link_config.active_duplex = DUPLEX_INVALID;
4812                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4813                                     LED_CTRL_LNKLED_OVERRIDE |
4814                                     LED_CTRL_TRAFFIC_OVERRIDE));
4815         }
4816
4817         if (current_link_up != netif_carrier_ok(tp->dev)) {
4818                 if (current_link_up)
4819                         netif_carrier_on(tp->dev);
4820                 else
4821                         netif_carrier_off(tp->dev);
4822                 tg3_link_report(tp);
4823         } else {
4824                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4825                 if (orig_pause_cfg != now_pause_cfg ||
4826                     orig_active_speed != tp->link_config.active_speed ||
4827                     orig_active_duplex != tp->link_config.active_duplex)
4828                         tg3_link_report(tp);
4829         }
4830
4831         return 0;
4832 }
4833
4834 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4835 {
4836         int current_link_up, err = 0;
4837         u32 bmsr, bmcr;
4838         u16 current_speed;
4839         u8 current_duplex;
4840         u32 local_adv, remote_adv;
4841
4842         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4843         tw32_f(MAC_MODE, tp->mac_mode);
4844         udelay(40);
4845
4846         tw32(MAC_EVENT, 0);
4847
4848         tw32_f(MAC_STATUS,
4849              (MAC_STATUS_SYNC_CHANGED |
4850               MAC_STATUS_CFG_CHANGED |
4851               MAC_STATUS_MI_COMPLETION |
4852               MAC_STATUS_LNKSTATE_CHANGED));
4853         udelay(40);
4854
4855         if (force_reset)
4856                 tg3_phy_reset(tp);
4857
4858         current_link_up = 0;
4859         current_speed = SPEED_INVALID;
4860         current_duplex = DUPLEX_INVALID;
4861
4862         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4863         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4864         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4865                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4866                         bmsr |= BMSR_LSTATUS;
4867                 else
4868                         bmsr &= ~BMSR_LSTATUS;
4869         }
4870
4871         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4872
4873         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4874             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4875                 /* do nothing, just check for link up at the end */
4876         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4877                 u32 adv, newadv;
4878
4879                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4880                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4881                                  ADVERTISE_1000XPAUSE |
4882                                  ADVERTISE_1000XPSE_ASYM |
4883                                  ADVERTISE_SLCT);
4884
4885                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4886                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
4887
4888                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
4889                         tg3_writephy(tp, MII_ADVERTISE, newadv);
4890                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4891                         tg3_writephy(tp, MII_BMCR, bmcr);
4892
4893                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4894                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4895                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4896
4897                         return err;
4898                 }
4899         } else {
4900                 u32 new_bmcr;
4901
4902                 bmcr &= ~BMCR_SPEED1000;
4903                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4904
4905                 if (tp->link_config.duplex == DUPLEX_FULL)
4906                         new_bmcr |= BMCR_FULLDPLX;
4907
4908                 if (new_bmcr != bmcr) {
4909                         /* BMCR_SPEED1000 is a reserved bit that needs
4910                          * to be set on write.
4911                          */
4912                         new_bmcr |= BMCR_SPEED1000;
4913
4914                         /* Force a linkdown */
4915                         if (netif_carrier_ok(tp->dev)) {
4916                                 u32 adv;
4917
4918                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4919                                 adv &= ~(ADVERTISE_1000XFULL |
4920                                          ADVERTISE_1000XHALF |
4921                                          ADVERTISE_SLCT);
4922                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4923                                 tg3_writephy(tp, MII_BMCR, bmcr |
4924                                                            BMCR_ANRESTART |
4925                                                            BMCR_ANENABLE);
4926                                 udelay(10);
4927                                 netif_carrier_off(tp->dev);
4928                         }
4929                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4930                         bmcr = new_bmcr;
4931                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4932                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4933                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4934                             ASIC_REV_5714) {
4935                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4936                                         bmsr |= BMSR_LSTATUS;
4937                                 else
4938                                         bmsr &= ~BMSR_LSTATUS;
4939                         }
4940                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4941                 }
4942         }
4943
4944         if (bmsr & BMSR_LSTATUS) {
4945                 current_speed = SPEED_1000;
4946                 current_link_up = 1;
4947                 if (bmcr & BMCR_FULLDPLX)
4948                         current_duplex = DUPLEX_FULL;
4949                 else
4950                         current_duplex = DUPLEX_HALF;
4951
4952                 local_adv = 0;
4953                 remote_adv = 0;
4954
4955                 if (bmcr & BMCR_ANENABLE) {
4956                         u32 common;
4957
4958                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4959                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4960                         common = local_adv & remote_adv;
4961                         if (common & (ADVERTISE_1000XHALF |
4962                                       ADVERTISE_1000XFULL)) {
4963                                 if (common & ADVERTISE_1000XFULL)
4964                                         current_duplex = DUPLEX_FULL;
4965                                 else
4966                                         current_duplex = DUPLEX_HALF;
4967                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4968                                 /* Link is up via parallel detect */
4969                         } else {
4970                                 current_link_up = 0;
4971                         }
4972                 }
4973         }
4974
4975         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4976                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4977
4978         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4979         if (tp->link_config.active_duplex == DUPLEX_HALF)
4980                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4981
4982         tw32_f(MAC_MODE, tp->mac_mode);
4983         udelay(40);
4984
4985         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4986
4987         tp->link_config.active_speed = current_speed;
4988         tp->link_config.active_duplex = current_duplex;
4989
4990         if (current_link_up != netif_carrier_ok(tp->dev)) {
4991                 if (current_link_up)
4992                         netif_carrier_on(tp->dev);
4993                 else {
4994                         netif_carrier_off(tp->dev);
4995                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4996                 }
4997                 tg3_link_report(tp);
4998         }
4999         return err;
5000 }
5001
5002 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5003 {
5004         if (tp->serdes_counter) {
5005                 /* Give autoneg time to complete. */
5006                 tp->serdes_counter--;
5007                 return;
5008         }
5009
5010         if (!netif_carrier_ok(tp->dev) &&
5011             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5012                 u32 bmcr;
5013
5014                 tg3_readphy(tp, MII_BMCR, &bmcr);
5015                 if (bmcr & BMCR_ANENABLE) {
5016                         u32 phy1, phy2;
5017
5018                         /* Select shadow register 0x1f */
5019                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5020                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5021
5022                         /* Select expansion interrupt status register */
5023                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5024                                          MII_TG3_DSP_EXP1_INT_STAT);
5025                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5026                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5027
5028                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5029                                 /* We have signal detect and not receiving
5030                                  * config code words, link is up by parallel
5031                                  * detection.
5032                                  */
5033
5034                                 bmcr &= ~BMCR_ANENABLE;
5035                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5036                                 tg3_writephy(tp, MII_BMCR, bmcr);
5037                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5038                         }
5039                 }
5040         } else if (netif_carrier_ok(tp->dev) &&
5041                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5042                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5043                 u32 phy2;
5044
5045                 /* Select expansion interrupt status register */
5046                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5047                                  MII_TG3_DSP_EXP1_INT_STAT);
5048                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5049                 if (phy2 & 0x20) {
5050                         u32 bmcr;
5051
5052                         /* Config code words received, turn on autoneg. */
5053                         tg3_readphy(tp, MII_BMCR, &bmcr);
5054                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5055
5056                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5057
5058                 }
5059         }
5060 }
5061
5062 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5063 {
5064         u32 val;
5065         int err;
5066
5067         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5068                 err = tg3_setup_fiber_phy(tp, force_reset);
5069         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5070                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5071         else
5072                 err = tg3_setup_copper_phy(tp, force_reset);
5073
5074         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5075                 u32 scale;
5076
5077                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5078                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5079                         scale = 65;
5080                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5081                         scale = 6;
5082                 else
5083                         scale = 12;
5084
5085                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5086                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5087                 tw32(GRC_MISC_CFG, val);
5088         }
5089
5090         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5091               (6 << TX_LENGTHS_IPG_SHIFT);
5092         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5093                 val |= tr32(MAC_TX_LENGTHS) &
5094                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5095                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5096
5097         if (tp->link_config.active_speed == SPEED_1000 &&
5098             tp->link_config.active_duplex == DUPLEX_HALF)
5099                 tw32(MAC_TX_LENGTHS, val |
5100                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5101         else
5102                 tw32(MAC_TX_LENGTHS, val |
5103                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5104
5105         if (!tg3_flag(tp, 5705_PLUS)) {
5106                 if (netif_carrier_ok(tp->dev)) {
5107                         tw32(HOSTCC_STAT_COAL_TICKS,
5108                              tp->coal.stats_block_coalesce_usecs);
5109                 } else {
5110                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5111                 }
5112         }
5113
5114         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5115                 val = tr32(PCIE_PWR_MGMT_THRESH);
5116                 if (!netif_carrier_ok(tp->dev))
5117                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5118                               tp->pwrmgmt_thresh;
5119                 else
5120                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5121                 tw32(PCIE_PWR_MGMT_THRESH, val);
5122         }
5123
5124         return err;
5125 }
5126
5127 static inline int tg3_irq_sync(struct tg3 *tp)
5128 {
5129         return tp->irq_sync;
5130 }
5131
5132 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5133 {
5134         int i;
5135
5136         dst = (u32 *)((u8 *)dst + off);
5137         for (i = 0; i < len; i += sizeof(u32))
5138                 *dst++ = tr32(off + i);
5139 }
5140
5141 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5142 {
5143         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5144         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5145         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5146         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5147         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5148         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5149         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5150         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5151         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5152         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5153         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5154         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5155         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5156         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5157         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5158         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5159         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5160         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5161         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5162
5163         if (tg3_flag(tp, SUPPORT_MSIX))
5164                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5165
5166         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5167         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5168         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5169         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5170         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5171         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5172         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5173         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5174
5175         if (!tg3_flag(tp, 5705_PLUS)) {
5176                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5177                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5178                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5179         }
5180
5181         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5182         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5183         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5184         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5185         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5186
5187         if (tg3_flag(tp, NVRAM))
5188                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5189 }
5190
5191 static void tg3_dump_state(struct tg3 *tp)
5192 {
5193         int i;
5194         u32 *regs;
5195
5196         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5197         if (!regs) {
5198                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5199                 return;
5200         }
5201
5202         if (tg3_flag(tp, PCI_EXPRESS)) {
5203                 /* Read up to but not including private PCI registers */
5204                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5205                         regs[i / sizeof(u32)] = tr32(i);
5206         } else
5207                 tg3_dump_legacy_regs(tp, regs);
5208
5209         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5210                 if (!regs[i + 0] && !regs[i + 1] &&
5211                     !regs[i + 2] && !regs[i + 3])
5212                         continue;
5213
5214                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5215                            i * 4,
5216                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5217         }
5218
5219         kfree(regs);
5220
5221         for (i = 0; i < tp->irq_cnt; i++) {
5222                 struct tg3_napi *tnapi = &tp->napi[i];
5223
5224                 /* SW status block */
5225                 netdev_err(tp->dev,
5226                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5227                            i,
5228                            tnapi->hw_status->status,
5229                            tnapi->hw_status->status_tag,
5230                            tnapi->hw_status->rx_jumbo_consumer,
5231                            tnapi->hw_status->rx_consumer,
5232                            tnapi->hw_status->rx_mini_consumer,
5233                            tnapi->hw_status->idx[0].rx_producer,
5234                            tnapi->hw_status->idx[0].tx_consumer);
5235
5236                 netdev_err(tp->dev,
5237                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5238                            i,
5239                            tnapi->last_tag, tnapi->last_irq_tag,
5240                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5241                            tnapi->rx_rcb_ptr,
5242                            tnapi->prodring.rx_std_prod_idx,
5243                            tnapi->prodring.rx_std_cons_idx,
5244                            tnapi->prodring.rx_jmb_prod_idx,
5245                            tnapi->prodring.rx_jmb_cons_idx);
5246         }
5247 }
5248
5249 /* This is called whenever we suspect that the system chipset is re-
5250  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5251  * is bogus tx completions. We try to recover by setting the
5252  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5253  * in the workqueue.
5254  */
5255 static void tg3_tx_recover(struct tg3 *tp)
5256 {
5257         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5258                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5259
5260         netdev_warn(tp->dev,
5261                     "The system may be re-ordering memory-mapped I/O "
5262                     "cycles to the network device, attempting to recover. "
5263                     "Please report the problem to the driver maintainer "
5264                     "and include system chipset information.\n");
5265
5266         spin_lock(&tp->lock);
5267         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5268         spin_unlock(&tp->lock);
5269 }
5270
5271 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5272 {
5273         /* Tell compiler to fetch tx indices from memory. */
5274         barrier();
5275         return tnapi->tx_pending -
5276                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5277 }
5278
5279 /* Tigon3 never reports partial packet sends.  So we do not
5280  * need special logic to handle SKBs that have not had all
5281  * of their frags sent yet, like SunGEM does.
5282  */
5283 static void tg3_tx(struct tg3_napi *tnapi)
5284 {
5285         struct tg3 *tp = tnapi->tp;
5286         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5287         u32 sw_idx = tnapi->tx_cons;
5288         struct netdev_queue *txq;
5289         int index = tnapi - tp->napi;
5290
5291         if (tg3_flag(tp, ENABLE_TSS))
5292                 index--;
5293
5294         txq = netdev_get_tx_queue(tp->dev, index);
5295
5296         while (sw_idx != hw_idx) {
5297                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5298                 struct sk_buff *skb = ri->skb;
5299                 int i, tx_bug = 0;
5300
5301                 if (unlikely(skb == NULL)) {
5302                         tg3_tx_recover(tp);
5303                         return;
5304                 }
5305
5306                 pci_unmap_single(tp->pdev,
5307                                  dma_unmap_addr(ri, mapping),
5308                                  skb_headlen(skb),
5309                                  PCI_DMA_TODEVICE);
5310
5311                 ri->skb = NULL;
5312
5313                 while (ri->fragmented) {
5314                         ri->fragmented = false;
5315                         sw_idx = NEXT_TX(sw_idx);
5316                         ri = &tnapi->tx_buffers[sw_idx];
5317                 }
5318
5319                 sw_idx = NEXT_TX(sw_idx);
5320
5321                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5322                         ri = &tnapi->tx_buffers[sw_idx];
5323                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5324                                 tx_bug = 1;
5325
5326                         pci_unmap_page(tp->pdev,
5327                                        dma_unmap_addr(ri, mapping),
5328                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5329                                        PCI_DMA_TODEVICE);
5330
5331                         while (ri->fragmented) {
5332                                 ri->fragmented = false;
5333                                 sw_idx = NEXT_TX(sw_idx);
5334                                 ri = &tnapi->tx_buffers[sw_idx];
5335                         }
5336
5337                         sw_idx = NEXT_TX(sw_idx);
5338                 }
5339
5340                 dev_kfree_skb(skb);
5341
5342                 if (unlikely(tx_bug)) {
5343                         tg3_tx_recover(tp);
5344                         return;
5345                 }
5346         }
5347
5348         tnapi->tx_cons = sw_idx;
5349
5350         /* Need to make the tx_cons update visible to tg3_start_xmit()
5351          * before checking for netif_queue_stopped().  Without the
5352          * memory barrier, there is a small possibility that tg3_start_xmit()
5353          * will miss it and cause the queue to be stopped forever.
5354          */
5355         smp_mb();
5356
5357         if (unlikely(netif_tx_queue_stopped(txq) &&
5358                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5359                 __netif_tx_lock(txq, smp_processor_id());
5360                 if (netif_tx_queue_stopped(txq) &&
5361                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5362                         netif_tx_wake_queue(txq);
5363                 __netif_tx_unlock(txq);
5364         }
5365 }
5366
5367 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5368 {
5369         if (!ri->data)
5370                 return;
5371
5372         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5373                          map_sz, PCI_DMA_FROMDEVICE);
5374         kfree(ri->data);
5375         ri->data = NULL;
5376 }
5377
5378 /* Returns size of skb allocated or < 0 on error.
5379  *
5380  * We only need to fill in the address because the other members
5381  * of the RX descriptor are invariant, see tg3_init_rings.
5382  *
5383  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5384  * posting buffers we only dirty the first cache line of the RX
5385  * descriptor (containing the address).  Whereas for the RX status
5386  * buffers the cpu only reads the last cacheline of the RX descriptor
5387  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5388  */
5389 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5390                             u32 opaque_key, u32 dest_idx_unmasked)
5391 {
5392         struct tg3_rx_buffer_desc *desc;
5393         struct ring_info *map;
5394         u8 *data;
5395         dma_addr_t mapping;
5396         int skb_size, data_size, dest_idx;
5397
5398         switch (opaque_key) {
5399         case RXD_OPAQUE_RING_STD:
5400                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5401                 desc = &tpr->rx_std[dest_idx];
5402                 map = &tpr->rx_std_buffers[dest_idx];
5403                 data_size = tp->rx_pkt_map_sz;
5404                 break;
5405
5406         case RXD_OPAQUE_RING_JUMBO:
5407                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5408                 desc = &tpr->rx_jmb[dest_idx].std;
5409                 map = &tpr->rx_jmb_buffers[dest_idx];
5410                 data_size = TG3_RX_JMB_MAP_SZ;
5411                 break;
5412
5413         default:
5414                 return -EINVAL;
5415         }
5416
5417         /* Do not overwrite any of the map or rp information
5418          * until we are sure we can commit to a new buffer.
5419          *
5420          * Callers depend upon this behavior and assume that
5421          * we leave everything unchanged if we fail.
5422          */
5423         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5424                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5425         data = kmalloc(skb_size, GFP_ATOMIC);
5426         if (!data)
5427                 return -ENOMEM;
5428
5429         mapping = pci_map_single(tp->pdev,
5430                                  data + TG3_RX_OFFSET(tp),
5431                                  data_size,
5432                                  PCI_DMA_FROMDEVICE);
5433         if (pci_dma_mapping_error(tp->pdev, mapping)) {
5434                 kfree(data);
5435                 return -EIO;
5436         }
5437
5438         map->data = data;
5439         dma_unmap_addr_set(map, mapping, mapping);
5440
5441         desc->addr_hi = ((u64)mapping >> 32);
5442         desc->addr_lo = ((u64)mapping & 0xffffffff);
5443
5444         return data_size;
5445 }
5446
5447 /* We only need to move over in the address because the other
5448  * members of the RX descriptor are invariant.  See notes above
5449  * tg3_alloc_rx_data for full details.
5450  */
5451 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5452                            struct tg3_rx_prodring_set *dpr,
5453                            u32 opaque_key, int src_idx,
5454                            u32 dest_idx_unmasked)
5455 {
5456         struct tg3 *tp = tnapi->tp;
5457         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5458         struct ring_info *src_map, *dest_map;
5459         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5460         int dest_idx;
5461
5462         switch (opaque_key) {
5463         case RXD_OPAQUE_RING_STD:
5464                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5465                 dest_desc = &dpr->rx_std[dest_idx];
5466                 dest_map = &dpr->rx_std_buffers[dest_idx];
5467                 src_desc = &spr->rx_std[src_idx];
5468                 src_map = &spr->rx_std_buffers[src_idx];
5469                 break;
5470
5471         case RXD_OPAQUE_RING_JUMBO:
5472                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5473                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5474                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5475                 src_desc = &spr->rx_jmb[src_idx].std;
5476                 src_map = &spr->rx_jmb_buffers[src_idx];
5477                 break;
5478
5479         default:
5480                 return;
5481         }
5482
5483         dest_map->data = src_map->data;
5484         dma_unmap_addr_set(dest_map, mapping,
5485                            dma_unmap_addr(src_map, mapping));
5486         dest_desc->addr_hi = src_desc->addr_hi;
5487         dest_desc->addr_lo = src_desc->addr_lo;
5488
5489         /* Ensure that the update to the skb happens after the physical
5490          * addresses have been transferred to the new BD location.
5491          */
5492         smp_wmb();
5493
5494         src_map->data = NULL;
5495 }
5496
5497 /* The RX ring scheme is composed of multiple rings which post fresh
5498  * buffers to the chip, and one special ring the chip uses to report
5499  * status back to the host.
5500  *
5501  * The special ring reports the status of received packets to the
5502  * host.  The chip does not write into the original descriptor the
5503  * RX buffer was obtained from.  The chip simply takes the original
5504  * descriptor as provided by the host, updates the status and length
5505  * field, then writes this into the next status ring entry.
5506  *
5507  * Each ring the host uses to post buffers to the chip is described
5508  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5509  * it is first placed into the on-chip ram.  When the packet's length
5510  * is known, it walks down the TG3_BDINFO entries to select the ring.
5511  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5512  * which is within the range of the new packet's length is chosen.
5513  *
5514  * The "separate ring for rx status" scheme may sound queer, but it makes
5515  * sense from a cache coherency perspective.  If only the host writes
5516  * to the buffer post rings, and only the chip writes to the rx status
5517  * rings, then cache lines never move beyond shared-modified state.
5518  * If both the host and chip were to write into the same ring, cache line
5519  * eviction could occur since both entities want it in an exclusive state.
5520  */
5521 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5522 {
5523         struct tg3 *tp = tnapi->tp;
5524         u32 work_mask, rx_std_posted = 0;
5525         u32 std_prod_idx, jmb_prod_idx;
5526         u32 sw_idx = tnapi->rx_rcb_ptr;
5527         u16 hw_idx;
5528         int received;
5529         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5530
5531         hw_idx = *(tnapi->rx_rcb_prod_idx);
5532         /*
5533          * We need to order the read of hw_idx and the read of
5534          * the opaque cookie.
5535          */
5536         rmb();
5537         work_mask = 0;
5538         received = 0;
5539         std_prod_idx = tpr->rx_std_prod_idx;
5540         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5541         while (sw_idx != hw_idx && budget > 0) {
5542                 struct ring_info *ri;
5543                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5544                 unsigned int len;
5545                 struct sk_buff *skb;
5546                 dma_addr_t dma_addr;
5547                 u32 opaque_key, desc_idx, *post_ptr;
5548                 u8 *data;
5549
5550                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5551                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5552                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5553                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5554                         dma_addr = dma_unmap_addr(ri, mapping);
5555                         data = ri->data;
5556                         post_ptr = &std_prod_idx;
5557                         rx_std_posted++;
5558                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5559                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5560                         dma_addr = dma_unmap_addr(ri, mapping);
5561                         data = ri->data;
5562                         post_ptr = &jmb_prod_idx;
5563                 } else
5564                         goto next_pkt_nopost;
5565
5566                 work_mask |= opaque_key;
5567
5568                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5569                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5570                 drop_it:
5571                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5572                                        desc_idx, *post_ptr);
5573                 drop_it_no_recycle:
5574                         /* Other statistics kept track of by card. */
5575                         tp->rx_dropped++;
5576                         goto next_pkt;
5577                 }
5578
5579                 prefetch(data + TG3_RX_OFFSET(tp));
5580                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5581                       ETH_FCS_LEN;
5582
5583                 if (len > TG3_RX_COPY_THRESH(tp)) {
5584                         int skb_size;
5585
5586                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5587                                                     *post_ptr);
5588                         if (skb_size < 0)
5589                                 goto drop_it;
5590
5591                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5592                                          PCI_DMA_FROMDEVICE);
5593
5594                         skb = build_skb(data);
5595                         if (!skb) {
5596                                 kfree(data);
5597                                 goto drop_it_no_recycle;
5598                         }
5599                         skb_reserve(skb, TG3_RX_OFFSET(tp));
5600                         /* Ensure that the update to the data happens
5601                          * after the usage of the old DMA mapping.
5602                          */
5603                         smp_wmb();
5604
5605                         ri->data = NULL;
5606
5607                 } else {
5608                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5609                                        desc_idx, *post_ptr);
5610
5611                         skb = netdev_alloc_skb(tp->dev,
5612                                                len + TG3_RAW_IP_ALIGN);
5613                         if (skb == NULL)
5614                                 goto drop_it_no_recycle;
5615
5616                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
5617                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5618                         memcpy(skb->data,
5619                                data + TG3_RX_OFFSET(tp),
5620                                len);
5621                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5622                 }
5623
5624                 skb_put(skb, len);
5625                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5626                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5627                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5628                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5629                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5630                 else
5631                         skb_checksum_none_assert(skb);
5632
5633                 skb->protocol = eth_type_trans(skb, tp->dev);
5634
5635                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5636                     skb->protocol != htons(ETH_P_8021Q)) {
5637                         dev_kfree_skb(skb);
5638                         goto drop_it_no_recycle;
5639                 }
5640
5641                 if (desc->type_flags & RXD_FLAG_VLAN &&
5642                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5643                         __vlan_hwaccel_put_tag(skb,
5644                                                desc->err_vlan & RXD_VLAN_MASK);
5645
5646                 napi_gro_receive(&tnapi->napi, skb);
5647
5648                 received++;
5649                 budget--;
5650
5651 next_pkt:
5652                 (*post_ptr)++;
5653
5654                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5655                         tpr->rx_std_prod_idx = std_prod_idx &
5656                                                tp->rx_std_ring_mask;
5657                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5658                                      tpr->rx_std_prod_idx);
5659                         work_mask &= ~RXD_OPAQUE_RING_STD;
5660                         rx_std_posted = 0;
5661                 }
5662 next_pkt_nopost:
5663                 sw_idx++;
5664                 sw_idx &= tp->rx_ret_ring_mask;
5665
5666                 /* Refresh hw_idx to see if there is new work */
5667                 if (sw_idx == hw_idx) {
5668                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5669                         rmb();
5670                 }
5671         }
5672
5673         /* ACK the status ring. */
5674         tnapi->rx_rcb_ptr = sw_idx;
5675         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5676
5677         /* Refill RX ring(s). */
5678         if (!tg3_flag(tp, ENABLE_RSS)) {
5679                 if (work_mask & RXD_OPAQUE_RING_STD) {
5680                         tpr->rx_std_prod_idx = std_prod_idx &
5681                                                tp->rx_std_ring_mask;
5682                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5683                                      tpr->rx_std_prod_idx);
5684                 }
5685                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5686                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5687                                                tp->rx_jmb_ring_mask;
5688                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5689                                      tpr->rx_jmb_prod_idx);
5690                 }
5691                 mmiowb();
5692         } else if (work_mask) {
5693                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5694                  * updated before the producer indices can be updated.
5695                  */
5696                 smp_wmb();
5697
5698                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5699                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5700
5701                 if (tnapi != &tp->napi[1])
5702                         napi_schedule(&tp->napi[1].napi);
5703         }
5704
5705         return received;
5706 }
5707
5708 static void tg3_poll_link(struct tg3 *tp)
5709 {
5710         /* handle link change and other phy events */
5711         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5712                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5713
5714                 if (sblk->status & SD_STATUS_LINK_CHG) {
5715                         sblk->status = SD_STATUS_UPDATED |
5716                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5717                         spin_lock(&tp->lock);
5718                         if (tg3_flag(tp, USE_PHYLIB)) {
5719                                 tw32_f(MAC_STATUS,
5720                                      (MAC_STATUS_SYNC_CHANGED |
5721                                       MAC_STATUS_CFG_CHANGED |
5722                                       MAC_STATUS_MI_COMPLETION |
5723                                       MAC_STATUS_LNKSTATE_CHANGED));
5724                                 udelay(40);
5725                         } else
5726                                 tg3_setup_phy(tp, 0);
5727                         spin_unlock(&tp->lock);
5728                 }
5729         }
5730 }
5731
5732 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5733                                 struct tg3_rx_prodring_set *dpr,
5734                                 struct tg3_rx_prodring_set *spr)
5735 {
5736         u32 si, di, cpycnt, src_prod_idx;
5737         int i, err = 0;
5738
5739         while (1) {
5740                 src_prod_idx = spr->rx_std_prod_idx;
5741
5742                 /* Make sure updates to the rx_std_buffers[] entries and the
5743                  * standard producer index are seen in the correct order.
5744                  */
5745                 smp_rmb();
5746
5747                 if (spr->rx_std_cons_idx == src_prod_idx)
5748                         break;
5749
5750                 if (spr->rx_std_cons_idx < src_prod_idx)
5751                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5752                 else
5753                         cpycnt = tp->rx_std_ring_mask + 1 -
5754                                  spr->rx_std_cons_idx;
5755
5756                 cpycnt = min(cpycnt,
5757                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5758
5759                 si = spr->rx_std_cons_idx;
5760                 di = dpr->rx_std_prod_idx;
5761
5762                 for (i = di; i < di + cpycnt; i++) {
5763                         if (dpr->rx_std_buffers[i].data) {
5764                                 cpycnt = i - di;
5765                                 err = -ENOSPC;
5766                                 break;
5767                         }
5768                 }
5769
5770                 if (!cpycnt)
5771                         break;
5772
5773                 /* Ensure that updates to the rx_std_buffers ring and the
5774                  * shadowed hardware producer ring from tg3_recycle_skb() are
5775                  * ordered correctly WRT the skb check above.
5776                  */
5777                 smp_rmb();
5778
5779                 memcpy(&dpr->rx_std_buffers[di],
5780                        &spr->rx_std_buffers[si],
5781                        cpycnt * sizeof(struct ring_info));
5782
5783                 for (i = 0; i < cpycnt; i++, di++, si++) {
5784                         struct tg3_rx_buffer_desc *sbd, *dbd;
5785                         sbd = &spr->rx_std[si];
5786                         dbd = &dpr->rx_std[di];
5787                         dbd->addr_hi = sbd->addr_hi;
5788                         dbd->addr_lo = sbd->addr_lo;
5789                 }
5790
5791                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5792                                        tp->rx_std_ring_mask;
5793                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5794                                        tp->rx_std_ring_mask;
5795         }
5796
5797         while (1) {
5798                 src_prod_idx = spr->rx_jmb_prod_idx;
5799
5800                 /* Make sure updates to the rx_jmb_buffers[] entries and
5801                  * the jumbo producer index are seen in the correct order.
5802                  */
5803                 smp_rmb();
5804
5805                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5806                         break;
5807
5808                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5809                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5810                 else
5811                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5812                                  spr->rx_jmb_cons_idx;
5813
5814                 cpycnt = min(cpycnt,
5815                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5816
5817                 si = spr->rx_jmb_cons_idx;
5818                 di = dpr->rx_jmb_prod_idx;
5819
5820                 for (i = di; i < di + cpycnt; i++) {
5821                         if (dpr->rx_jmb_buffers[i].data) {
5822                                 cpycnt = i - di;
5823                                 err = -ENOSPC;
5824                                 break;
5825                         }
5826                 }
5827
5828                 if (!cpycnt)
5829                         break;
5830
5831                 /* Ensure that updates to the rx_jmb_buffers ring and the
5832                  * shadowed hardware producer ring from tg3_recycle_skb() are
5833                  * ordered correctly WRT the skb check above.
5834                  */
5835                 smp_rmb();
5836
5837                 memcpy(&dpr->rx_jmb_buffers[di],
5838                        &spr->rx_jmb_buffers[si],
5839                        cpycnt * sizeof(struct ring_info));
5840
5841                 for (i = 0; i < cpycnt; i++, di++, si++) {
5842                         struct tg3_rx_buffer_desc *sbd, *dbd;
5843                         sbd = &spr->rx_jmb[si].std;
5844                         dbd = &dpr->rx_jmb[di].std;
5845                         dbd->addr_hi = sbd->addr_hi;
5846                         dbd->addr_lo = sbd->addr_lo;
5847                 }
5848
5849                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5850                                        tp->rx_jmb_ring_mask;
5851                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5852                                        tp->rx_jmb_ring_mask;
5853         }
5854
5855         return err;
5856 }
5857
5858 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5859 {
5860         struct tg3 *tp = tnapi->tp;
5861
5862         /* run TX completion thread */
5863         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5864                 tg3_tx(tnapi);
5865                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5866                         return work_done;
5867         }
5868
5869         /* run RX thread, within the bounds set by NAPI.
5870          * All RX "locking" is done by ensuring outside
5871          * code synchronizes with tg3->napi.poll()
5872          */
5873         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5874                 work_done += tg3_rx(tnapi, budget - work_done);
5875
5876         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5877                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5878                 int i, err = 0;
5879                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5880                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5881
5882                 for (i = 1; i < tp->irq_cnt; i++)
5883                         err |= tg3_rx_prodring_xfer(tp, dpr,
5884                                                     &tp->napi[i].prodring);
5885
5886                 wmb();
5887
5888                 if (std_prod_idx != dpr->rx_std_prod_idx)
5889                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5890                                      dpr->rx_std_prod_idx);
5891
5892                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5893                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5894                                      dpr->rx_jmb_prod_idx);
5895
5896                 mmiowb();
5897
5898                 if (err)
5899                         tw32_f(HOSTCC_MODE, tp->coal_now);
5900         }
5901
5902         return work_done;
5903 }
5904
5905 static inline void tg3_reset_task_schedule(struct tg3 *tp)
5906 {
5907         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
5908                 schedule_work(&tp->reset_task);
5909 }
5910
5911 static inline void tg3_reset_task_cancel(struct tg3 *tp)
5912 {
5913         cancel_work_sync(&tp->reset_task);
5914         tg3_flag_clear(tp, RESET_TASK_PENDING);
5915 }
5916
5917 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5918 {
5919         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5920         struct tg3 *tp = tnapi->tp;
5921         int work_done = 0;
5922         struct tg3_hw_status *sblk = tnapi->hw_status;
5923
5924         while (1) {
5925                 work_done = tg3_poll_work(tnapi, work_done, budget);
5926
5927                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5928                         goto tx_recovery;
5929
5930                 if (unlikely(work_done >= budget))
5931                         break;
5932
5933                 /* tp->last_tag is used in tg3_int_reenable() below
5934                  * to tell the hw how much work has been processed,
5935                  * so we must read it before checking for more work.
5936                  */
5937                 tnapi->last_tag = sblk->status_tag;
5938                 tnapi->last_irq_tag = tnapi->last_tag;
5939                 rmb();
5940
5941                 /* check for RX/TX work to do */
5942                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5943                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5944                         napi_complete(napi);
5945                         /* Reenable interrupts. */
5946                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5947                         mmiowb();
5948                         break;
5949                 }
5950         }
5951
5952         return work_done;
5953
5954 tx_recovery:
5955         /* work_done is guaranteed to be less than budget. */
5956         napi_complete(napi);
5957         tg3_reset_task_schedule(tp);
5958         return work_done;
5959 }
5960
5961 static void tg3_process_error(struct tg3 *tp)
5962 {
5963         u32 val;
5964         bool real_error = false;
5965
5966         if (tg3_flag(tp, ERROR_PROCESSED))
5967                 return;
5968
5969         /* Check Flow Attention register */
5970         val = tr32(HOSTCC_FLOW_ATTN);
5971         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5972                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5973                 real_error = true;
5974         }
5975
5976         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5977                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5978                 real_error = true;
5979         }
5980
5981         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5982                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5983                 real_error = true;
5984         }
5985
5986         if (!real_error)
5987                 return;
5988
5989         tg3_dump_state(tp);
5990
5991         tg3_flag_set(tp, ERROR_PROCESSED);
5992         tg3_reset_task_schedule(tp);
5993 }
5994
5995 static int tg3_poll(struct napi_struct *napi, int budget)
5996 {
5997         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5998         struct tg3 *tp = tnapi->tp;
5999         int work_done = 0;
6000         struct tg3_hw_status *sblk = tnapi->hw_status;
6001
6002         while (1) {
6003                 if (sblk->status & SD_STATUS_ERROR)
6004                         tg3_process_error(tp);
6005
6006                 tg3_poll_link(tp);
6007
6008                 work_done = tg3_poll_work(tnapi, work_done, budget);
6009
6010                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6011                         goto tx_recovery;
6012
6013                 if (unlikely(work_done >= budget))
6014                         break;
6015
6016                 if (tg3_flag(tp, TAGGED_STATUS)) {
6017                         /* tp->last_tag is used in tg3_int_reenable() below
6018                          * to tell the hw how much work has been processed,
6019                          * so we must read it before checking for more work.
6020                          */
6021                         tnapi->last_tag = sblk->status_tag;
6022                         tnapi->last_irq_tag = tnapi->last_tag;
6023                         rmb();
6024                 } else
6025                         sblk->status &= ~SD_STATUS_UPDATED;
6026
6027                 if (likely(!tg3_has_work(tnapi))) {
6028                         napi_complete(napi);
6029                         tg3_int_reenable(tnapi);
6030                         break;
6031                 }
6032         }
6033
6034         return work_done;
6035
6036 tx_recovery:
6037         /* work_done is guaranteed to be less than budget. */
6038         napi_complete(napi);
6039         tg3_reset_task_schedule(tp);
6040         return work_done;
6041 }
6042
6043 static void tg3_napi_disable(struct tg3 *tp)
6044 {
6045         int i;
6046
6047         for (i = tp->irq_cnt - 1; i >= 0; i--)
6048                 napi_disable(&tp->napi[i].napi);
6049 }
6050
6051 static void tg3_napi_enable(struct tg3 *tp)
6052 {
6053         int i;
6054
6055         for (i = 0; i < tp->irq_cnt; i++)
6056                 napi_enable(&tp->napi[i].napi);
6057 }
6058
6059 static void tg3_napi_init(struct tg3 *tp)
6060 {
6061         int i;
6062
6063         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6064         for (i = 1; i < tp->irq_cnt; i++)
6065                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6066 }
6067
6068 static void tg3_napi_fini(struct tg3 *tp)
6069 {
6070         int i;
6071
6072         for (i = 0; i < tp->irq_cnt; i++)
6073                 netif_napi_del(&tp->napi[i].napi);
6074 }
6075
6076 static inline void tg3_netif_stop(struct tg3 *tp)
6077 {
6078         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6079         tg3_napi_disable(tp);
6080         netif_tx_disable(tp->dev);
6081 }
6082
6083 static inline void tg3_netif_start(struct tg3 *tp)
6084 {
6085         /* NOTE: unconditional netif_tx_wake_all_queues is only
6086          * appropriate so long as all callers are assured to
6087          * have free tx slots (such as after tg3_init_hw)
6088          */
6089         netif_tx_wake_all_queues(tp->dev);
6090
6091         tg3_napi_enable(tp);
6092         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6093         tg3_enable_ints(tp);
6094 }
6095
6096 static void tg3_irq_quiesce(struct tg3 *tp)
6097 {
6098         int i;
6099
6100         BUG_ON(tp->irq_sync);
6101
6102         tp->irq_sync = 1;
6103         smp_mb();
6104
6105         for (i = 0; i < tp->irq_cnt; i++)
6106                 synchronize_irq(tp->napi[i].irq_vec);
6107 }
6108
6109 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6110  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6111  * with as well.  Most of the time, this is not necessary except when
6112  * shutting down the device.
6113  */
6114 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6115 {
6116         spin_lock_bh(&tp->lock);
6117         if (irq_sync)
6118                 tg3_irq_quiesce(tp);
6119 }
6120
6121 static inline void tg3_full_unlock(struct tg3 *tp)
6122 {
6123         spin_unlock_bh(&tp->lock);
6124 }
6125
6126 /* One-shot MSI handler - Chip automatically disables interrupt
6127  * after sending MSI so driver doesn't have to do it.
6128  */
6129 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6130 {
6131         struct tg3_napi *tnapi = dev_id;
6132         struct tg3 *tp = tnapi->tp;
6133
6134         prefetch(tnapi->hw_status);
6135         if (tnapi->rx_rcb)
6136                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6137
6138         if (likely(!tg3_irq_sync(tp)))
6139                 napi_schedule(&tnapi->napi);
6140
6141         return IRQ_HANDLED;
6142 }
6143
6144 /* MSI ISR - No need to check for interrupt sharing and no need to
6145  * flush status block and interrupt mailbox. PCI ordering rules
6146  * guarantee that MSI will arrive after the status block.
6147  */
6148 static irqreturn_t tg3_msi(int irq, void *dev_id)
6149 {
6150         struct tg3_napi *tnapi = dev_id;
6151         struct tg3 *tp = tnapi->tp;
6152
6153         prefetch(tnapi->hw_status);
6154         if (tnapi->rx_rcb)
6155                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6156         /*
6157          * Writing any value to intr-mbox-0 clears PCI INTA# and
6158          * chip-internal interrupt pending events.
6159          * Writing non-zero to intr-mbox-0 additional tells the
6160          * NIC to stop sending us irqs, engaging "in-intr-handler"
6161          * event coalescing.
6162          */
6163         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6164         if (likely(!tg3_irq_sync(tp)))
6165                 napi_schedule(&tnapi->napi);
6166
6167         return IRQ_RETVAL(1);
6168 }
6169
6170 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6171 {
6172         struct tg3_napi *tnapi = dev_id;
6173         struct tg3 *tp = tnapi->tp;
6174         struct tg3_hw_status *sblk = tnapi->hw_status;
6175         unsigned int handled = 1;
6176
6177         /* In INTx mode, it is possible for the interrupt to arrive at
6178          * the CPU before the status block posted prior to the interrupt.
6179          * Reading the PCI State register will confirm whether the
6180          * interrupt is ours and will flush the status block.
6181          */
6182         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6183                 if (tg3_flag(tp, CHIP_RESETTING) ||
6184                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6185                         handled = 0;
6186                         goto out;
6187                 }
6188         }
6189
6190         /*
6191          * Writing any value to intr-mbox-0 clears PCI INTA# and
6192          * chip-internal interrupt pending events.
6193          * Writing non-zero to intr-mbox-0 additional tells the
6194          * NIC to stop sending us irqs, engaging "in-intr-handler"
6195          * event coalescing.
6196          *
6197          * Flush the mailbox to de-assert the IRQ immediately to prevent
6198          * spurious interrupts.  The flush impacts performance but
6199          * excessive spurious interrupts can be worse in some cases.
6200          */
6201         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6202         if (tg3_irq_sync(tp))
6203                 goto out;
6204         sblk->status &= ~SD_STATUS_UPDATED;
6205         if (likely(tg3_has_work(tnapi))) {
6206                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6207                 napi_schedule(&tnapi->napi);
6208         } else {
6209                 /* No work, shared interrupt perhaps?  re-enable
6210                  * interrupts, and flush that PCI write
6211                  */
6212                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6213                                0x00000000);
6214         }
6215 out:
6216         return IRQ_RETVAL(handled);
6217 }
6218
6219 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6220 {
6221         struct tg3_napi *tnapi = dev_id;
6222         struct tg3 *tp = tnapi->tp;
6223         struct tg3_hw_status *sblk = tnapi->hw_status;
6224         unsigned int handled = 1;
6225
6226         /* In INTx mode, it is possible for the interrupt to arrive at
6227          * the CPU before the status block posted prior to the interrupt.
6228          * Reading the PCI State register will confirm whether the
6229          * interrupt is ours and will flush the status block.
6230          */
6231         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6232                 if (tg3_flag(tp, CHIP_RESETTING) ||
6233                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6234                         handled = 0;
6235                         goto out;
6236                 }
6237         }
6238
6239         /*
6240          * writing any value to intr-mbox-0 clears PCI INTA# and
6241          * chip-internal interrupt pending events.
6242          * writing non-zero to intr-mbox-0 additional tells the
6243          * NIC to stop sending us irqs, engaging "in-intr-handler"
6244          * event coalescing.
6245          *
6246          * Flush the mailbox to de-assert the IRQ immediately to prevent
6247          * spurious interrupts.  The flush impacts performance but
6248          * excessive spurious interrupts can be worse in some cases.
6249          */
6250         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6251
6252         /*
6253          * In a shared interrupt configuration, sometimes other devices'
6254          * interrupts will scream.  We record the current status tag here
6255          * so that the above check can report that the screaming interrupts
6256          * are unhandled.  Eventually they will be silenced.
6257          */
6258         tnapi->last_irq_tag = sblk->status_tag;
6259
6260         if (tg3_irq_sync(tp))
6261                 goto out;
6262
6263         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6264
6265         napi_schedule(&tnapi->napi);
6266
6267 out:
6268         return IRQ_RETVAL(handled);
6269 }
6270
6271 /* ISR for interrupt test */
6272 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6273 {
6274         struct tg3_napi *tnapi = dev_id;
6275         struct tg3 *tp = tnapi->tp;
6276         struct tg3_hw_status *sblk = tnapi->hw_status;
6277
6278         if ((sblk->status & SD_STATUS_UPDATED) ||
6279             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6280                 tg3_disable_ints(tp);
6281                 return IRQ_RETVAL(1);
6282         }
6283         return IRQ_RETVAL(0);
6284 }
6285
6286 static int tg3_init_hw(struct tg3 *, int);
6287 static int tg3_halt(struct tg3 *, int, int);
6288
6289 /* Restart hardware after configuration changes, self-test, etc.
6290  * Invoked with tp->lock held.
6291  */
6292 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
6293         __releases(tp->lock)
6294         __acquires(tp->lock)
6295 {
6296         int err;
6297
6298         err = tg3_init_hw(tp, reset_phy);
6299         if (err) {
6300                 netdev_err(tp->dev,
6301                            "Failed to re-initialize device, aborting\n");
6302                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6303                 tg3_full_unlock(tp);
6304                 del_timer_sync(&tp->timer);
6305                 tp->irq_sync = 0;
6306                 tg3_napi_enable(tp);
6307                 dev_close(tp->dev);
6308                 tg3_full_lock(tp, 0);
6309         }
6310         return err;
6311 }
6312
6313 #ifdef CONFIG_NET_POLL_CONTROLLER
6314 static void tg3_poll_controller(struct net_device *dev)
6315 {
6316         int i;
6317         struct tg3 *tp = netdev_priv(dev);
6318
6319         for (i = 0; i < tp->irq_cnt; i++)
6320                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6321 }
6322 #endif
6323
6324 static void tg3_reset_task(struct work_struct *work)
6325 {
6326         struct tg3 *tp = container_of(work, struct tg3, reset_task);
6327         int err;
6328
6329         tg3_full_lock(tp, 0);
6330
6331         if (!netif_running(tp->dev)) {
6332                 tg3_flag_clear(tp, RESET_TASK_PENDING);
6333                 tg3_full_unlock(tp);
6334                 return;
6335         }
6336
6337         tg3_full_unlock(tp);
6338
6339         tg3_phy_stop(tp);
6340
6341         tg3_netif_stop(tp);
6342
6343         tg3_full_lock(tp, 1);
6344
6345         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
6346                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
6347                 tp->write32_rx_mbox = tg3_write_flush_reg32;
6348                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
6349                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6350         }
6351
6352         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
6353         err = tg3_init_hw(tp, 1);
6354         if (err)
6355                 goto out;
6356
6357         tg3_netif_start(tp);
6358
6359 out:
6360         tg3_full_unlock(tp);
6361
6362         if (!err)
6363                 tg3_phy_start(tp);
6364
6365         tg3_flag_clear(tp, RESET_TASK_PENDING);
6366 }
6367
6368 static void tg3_tx_timeout(struct net_device *dev)
6369 {
6370         struct tg3 *tp = netdev_priv(dev);
6371
6372         if (netif_msg_tx_err(tp)) {
6373                 netdev_err(dev, "transmit timed out, resetting\n");
6374                 tg3_dump_state(tp);
6375         }
6376
6377         tg3_reset_task_schedule(tp);
6378 }
6379
6380 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6381 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6382 {
6383         u32 base = (u32) mapping & 0xffffffff;
6384
6385         return (base > 0xffffdcc0) && (base + len + 8 < base);
6386 }
6387
6388 /* Test for DMA addresses > 40-bit */
6389 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6390                                           int len)
6391 {
6392 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6393         if (tg3_flag(tp, 40BIT_DMA_BUG))
6394                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6395         return 0;
6396 #else
6397         return 0;
6398 #endif
6399 }
6400
6401 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6402                                  dma_addr_t mapping, u32 len, u32 flags,
6403                                  u32 mss, u32 vlan)
6404 {
6405         txbd->addr_hi = ((u64) mapping >> 32);
6406         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6407         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6408         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6409 }
6410
6411 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6412                             dma_addr_t map, u32 len, u32 flags,
6413                             u32 mss, u32 vlan)
6414 {
6415         struct tg3 *tp = tnapi->tp;
6416         bool hwbug = false;
6417
6418         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6419                 hwbug = 1;
6420
6421         if (tg3_4g_overflow_test(map, len))
6422                 hwbug = 1;
6423
6424         if (tg3_40bit_overflow_test(tp, map, len))
6425                 hwbug = 1;
6426
6427         if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
6428                 u32 prvidx = *entry;
6429                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6430                 while (len > TG3_TX_BD_DMA_MAX && *budget) {
6431                         u32 frag_len = TG3_TX_BD_DMA_MAX;
6432                         len -= TG3_TX_BD_DMA_MAX;
6433
6434                         /* Avoid the 8byte DMA problem */
6435                         if (len <= 8) {
6436                                 len += TG3_TX_BD_DMA_MAX / 2;
6437                                 frag_len = TG3_TX_BD_DMA_MAX / 2;
6438                         }
6439
6440                         tnapi->tx_buffers[*entry].fragmented = true;
6441
6442                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6443                                       frag_len, tmp_flag, mss, vlan);
6444                         *budget -= 1;
6445                         prvidx = *entry;
6446                         *entry = NEXT_TX(*entry);
6447
6448                         map += frag_len;
6449                 }
6450
6451                 if (len) {
6452                         if (*budget) {
6453                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6454                                               len, flags, mss, vlan);
6455                                 *budget -= 1;
6456                                 *entry = NEXT_TX(*entry);
6457                         } else {
6458                                 hwbug = 1;
6459                                 tnapi->tx_buffers[prvidx].fragmented = false;
6460                         }
6461                 }
6462         } else {
6463                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6464                               len, flags, mss, vlan);
6465                 *entry = NEXT_TX(*entry);
6466         }
6467
6468         return hwbug;
6469 }
6470
6471 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6472 {
6473         int i;
6474         struct sk_buff *skb;
6475         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6476
6477         skb = txb->skb;
6478         txb->skb = NULL;
6479
6480         pci_unmap_single(tnapi->tp->pdev,
6481                          dma_unmap_addr(txb, mapping),
6482                          skb_headlen(skb),
6483                          PCI_DMA_TODEVICE);
6484
6485         while (txb->fragmented) {
6486                 txb->fragmented = false;
6487                 entry = NEXT_TX(entry);
6488                 txb = &tnapi->tx_buffers[entry];
6489         }
6490
6491         for (i = 0; i <= last; i++) {
6492                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6493
6494                 entry = NEXT_TX(entry);
6495                 txb = &tnapi->tx_buffers[entry];
6496
6497                 pci_unmap_page(tnapi->tp->pdev,
6498                                dma_unmap_addr(txb, mapping),
6499                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6500
6501                 while (txb->fragmented) {
6502                         txb->fragmented = false;
6503                         entry = NEXT_TX(entry);
6504                         txb = &tnapi->tx_buffers[entry];
6505                 }
6506         }
6507 }
6508
6509 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6510 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6511                                        struct sk_buff **pskb,
6512                                        u32 *entry, u32 *budget,
6513                                        u32 base_flags, u32 mss, u32 vlan)
6514 {
6515         struct tg3 *tp = tnapi->tp;
6516         struct sk_buff *new_skb, *skb = *pskb;
6517         dma_addr_t new_addr = 0;
6518         int ret = 0;
6519
6520         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6521                 new_skb = skb_copy(skb, GFP_ATOMIC);
6522         else {
6523                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6524
6525                 new_skb = skb_copy_expand(skb,
6526                                           skb_headroom(skb) + more_headroom,
6527                                           skb_tailroom(skb), GFP_ATOMIC);
6528         }
6529
6530         if (!new_skb) {
6531                 ret = -1;
6532         } else {
6533                 /* New SKB is guaranteed to be linear. */
6534                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6535                                           PCI_DMA_TODEVICE);
6536                 /* Make sure the mapping succeeded */
6537                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6538                         dev_kfree_skb(new_skb);
6539                         ret = -1;
6540                 } else {
6541                         u32 save_entry = *entry;
6542
6543                         base_flags |= TXD_FLAG_END;
6544
6545                         tnapi->tx_buffers[*entry].skb = new_skb;
6546                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6547                                            mapping, new_addr);
6548
6549                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6550                                             new_skb->len, base_flags,
6551                                             mss, vlan)) {
6552                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6553                                 dev_kfree_skb(new_skb);
6554                                 ret = -1;
6555                         }
6556                 }
6557         }
6558
6559         dev_kfree_skb(skb);
6560         *pskb = new_skb;
6561         return ret;
6562 }
6563
6564 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6565
6566 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6567  * TSO header is greater than 80 bytes.
6568  */
6569 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6570 {
6571         struct sk_buff *segs, *nskb;
6572         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6573
6574         /* Estimate the number of fragments in the worst case */
6575         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6576                 netif_stop_queue(tp->dev);
6577
6578                 /* netif_tx_stop_queue() must be done before checking
6579                  * checking tx index in tg3_tx_avail() below, because in
6580                  * tg3_tx(), we update tx index before checking for
6581                  * netif_tx_queue_stopped().
6582                  */
6583                 smp_mb();
6584                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6585                         return NETDEV_TX_BUSY;
6586
6587                 netif_wake_queue(tp->dev);
6588         }
6589
6590         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6591         if (IS_ERR(segs))
6592                 goto tg3_tso_bug_end;
6593
6594         do {
6595                 nskb = segs;
6596                 segs = segs->next;
6597                 nskb->next = NULL;
6598                 tg3_start_xmit(nskb, tp->dev);
6599         } while (segs);
6600
6601 tg3_tso_bug_end:
6602         dev_kfree_skb(skb);
6603
6604         return NETDEV_TX_OK;
6605 }
6606
6607 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6608  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6609  */
6610 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6611 {
6612         struct tg3 *tp = netdev_priv(dev);
6613         u32 len, entry, base_flags, mss, vlan = 0;
6614         u32 budget;
6615         int i = -1, would_hit_hwbug;
6616         dma_addr_t mapping;
6617         struct tg3_napi *tnapi;
6618         struct netdev_queue *txq;
6619         unsigned int last;
6620
6621         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6622         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6623         if (tg3_flag(tp, ENABLE_TSS))
6624                 tnapi++;
6625
6626         budget = tg3_tx_avail(tnapi);
6627
6628         /* We are running in BH disabled context with netif_tx_lock
6629          * and TX reclaim runs via tp->napi.poll inside of a software
6630          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6631          * no IRQ context deadlocks to worry about either.  Rejoice!
6632          */
6633         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6634                 if (!netif_tx_queue_stopped(txq)) {
6635                         netif_tx_stop_queue(txq);
6636
6637                         /* This is a hard error, log it. */
6638                         netdev_err(dev,
6639                                    "BUG! Tx Ring full when queue awake!\n");
6640                 }
6641                 return NETDEV_TX_BUSY;
6642         }
6643
6644         entry = tnapi->tx_prod;
6645         base_flags = 0;
6646         if (skb->ip_summed == CHECKSUM_PARTIAL)
6647                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6648
6649         mss = skb_shinfo(skb)->gso_size;
6650         if (mss) {
6651                 struct iphdr *iph;
6652                 u32 tcp_opt_len, hdr_len;
6653
6654                 if (skb_header_cloned(skb) &&
6655                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6656                         goto drop;
6657
6658                 iph = ip_hdr(skb);
6659                 tcp_opt_len = tcp_optlen(skb);
6660
6661                 if (skb_is_gso_v6(skb)) {
6662                         hdr_len = skb_headlen(skb) - ETH_HLEN;
6663                 } else {
6664                         u32 ip_tcp_len;
6665
6666                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6667                         hdr_len = ip_tcp_len + tcp_opt_len;
6668
6669                         iph->check = 0;
6670                         iph->tot_len = htons(mss + hdr_len);
6671                 }
6672
6673                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6674                     tg3_flag(tp, TSO_BUG))
6675                         return tg3_tso_bug(tp, skb);
6676
6677                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6678                                TXD_FLAG_CPU_POST_DMA);
6679
6680                 if (tg3_flag(tp, HW_TSO_1) ||
6681                     tg3_flag(tp, HW_TSO_2) ||
6682                     tg3_flag(tp, HW_TSO_3)) {
6683                         tcp_hdr(skb)->check = 0;
6684                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6685                 } else
6686                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6687                                                                  iph->daddr, 0,
6688                                                                  IPPROTO_TCP,
6689                                                                  0);
6690
6691                 if (tg3_flag(tp, HW_TSO_3)) {
6692                         mss |= (hdr_len & 0xc) << 12;
6693                         if (hdr_len & 0x10)
6694                                 base_flags |= 0x00000010;
6695                         base_flags |= (hdr_len & 0x3e0) << 5;
6696                 } else if (tg3_flag(tp, HW_TSO_2))
6697                         mss |= hdr_len << 9;
6698                 else if (tg3_flag(tp, HW_TSO_1) ||
6699                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6700                         if (tcp_opt_len || iph->ihl > 5) {
6701                                 int tsflags;
6702
6703                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6704                                 mss |= (tsflags << 11);
6705                         }
6706                 } else {
6707                         if (tcp_opt_len || iph->ihl > 5) {
6708                                 int tsflags;
6709
6710                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6711                                 base_flags |= tsflags << 12;
6712                         }
6713                 }
6714         }
6715
6716         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6717             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6718                 base_flags |= TXD_FLAG_JMB_PKT;
6719
6720         if (vlan_tx_tag_present(skb)) {
6721                 base_flags |= TXD_FLAG_VLAN;
6722                 vlan = vlan_tx_tag_get(skb);
6723         }
6724
6725         len = skb_headlen(skb);
6726
6727         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6728         if (pci_dma_mapping_error(tp->pdev, mapping))
6729                 goto drop;
6730
6731
6732         tnapi->tx_buffers[entry].skb = skb;
6733         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6734
6735         would_hit_hwbug = 0;
6736
6737         if (tg3_flag(tp, 5701_DMA_BUG))
6738                 would_hit_hwbug = 1;
6739
6740         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6741                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6742                             mss, vlan)) {
6743                 would_hit_hwbug = 1;
6744         /* Now loop through additional data fragments, and queue them. */
6745         } else if (skb_shinfo(skb)->nr_frags > 0) {
6746                 u32 tmp_mss = mss;
6747
6748                 if (!tg3_flag(tp, HW_TSO_1) &&
6749                     !tg3_flag(tp, HW_TSO_2) &&
6750                     !tg3_flag(tp, HW_TSO_3))
6751                         tmp_mss = 0;
6752
6753                 last = skb_shinfo(skb)->nr_frags - 1;
6754                 for (i = 0; i <= last; i++) {
6755                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6756
6757                         len = skb_frag_size(frag);
6758                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6759                                                    len, DMA_TO_DEVICE);
6760
6761                         tnapi->tx_buffers[entry].skb = NULL;
6762                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6763                                            mapping);
6764                         if (dma_mapping_error(&tp->pdev->dev, mapping))
6765                                 goto dma_error;
6766
6767                         if (!budget ||
6768                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6769                                             len, base_flags |
6770                                             ((i == last) ? TXD_FLAG_END : 0),
6771                                             tmp_mss, vlan)) {
6772                                 would_hit_hwbug = 1;
6773                                 break;
6774                         }
6775                 }
6776         }
6777
6778         if (would_hit_hwbug) {
6779                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6780
6781                 /* If the workaround fails due to memory/mapping
6782                  * failure, silently drop this packet.
6783                  */
6784                 entry = tnapi->tx_prod;
6785                 budget = tg3_tx_avail(tnapi);
6786                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6787                                                 base_flags, mss, vlan))
6788                         goto drop_nofree;
6789         }
6790
6791         skb_tx_timestamp(skb);
6792
6793         /* Packets are ready, update Tx producer idx local and on card. */
6794         tw32_tx_mbox(tnapi->prodmbox, entry);
6795
6796         tnapi->tx_prod = entry;
6797         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6798                 netif_tx_stop_queue(txq);
6799
6800                 /* netif_tx_stop_queue() must be done before checking
6801                  * checking tx index in tg3_tx_avail() below, because in
6802                  * tg3_tx(), we update tx index before checking for
6803                  * netif_tx_queue_stopped().
6804                  */
6805                 smp_mb();
6806                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6807                         netif_tx_wake_queue(txq);
6808         }
6809
6810         mmiowb();
6811         return NETDEV_TX_OK;
6812
6813 dma_error:
6814         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
6815         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6816 drop:
6817         dev_kfree_skb(skb);
6818 drop_nofree:
6819         tp->tx_dropped++;
6820         return NETDEV_TX_OK;
6821 }
6822
6823 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
6824 {
6825         if (enable) {
6826                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
6827                                   MAC_MODE_PORT_MODE_MASK);
6828
6829                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6830
6831                 if (!tg3_flag(tp, 5705_PLUS))
6832                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6833
6834                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
6835                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
6836                 else
6837                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
6838         } else {
6839                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6840
6841                 if (tg3_flag(tp, 5705_PLUS) ||
6842                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
6843                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
6844                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
6845         }
6846
6847         tw32(MAC_MODE, tp->mac_mode);
6848         udelay(40);
6849 }
6850
6851 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
6852 {
6853         u32 val, bmcr, mac_mode, ptest = 0;
6854
6855         tg3_phy_toggle_apd(tp, false);
6856         tg3_phy_toggle_automdix(tp, 0);
6857
6858         if (extlpbk && tg3_phy_set_extloopbk(tp))
6859                 return -EIO;
6860
6861         bmcr = BMCR_FULLDPLX;
6862         switch (speed) {
6863         case SPEED_10:
6864                 break;
6865         case SPEED_100:
6866                 bmcr |= BMCR_SPEED100;
6867                 break;
6868         case SPEED_1000:
6869         default:
6870                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
6871                         speed = SPEED_100;
6872                         bmcr |= BMCR_SPEED100;
6873                 } else {
6874                         speed = SPEED_1000;
6875                         bmcr |= BMCR_SPEED1000;
6876                 }
6877         }
6878
6879         if (extlpbk) {
6880                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
6881                         tg3_readphy(tp, MII_CTRL1000, &val);
6882                         val |= CTL1000_AS_MASTER |
6883                                CTL1000_ENABLE_MASTER;
6884                         tg3_writephy(tp, MII_CTRL1000, val);
6885                 } else {
6886                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
6887                                 MII_TG3_FET_PTEST_TRIM_2;
6888                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
6889                 }
6890         } else
6891                 bmcr |= BMCR_LOOPBACK;
6892
6893         tg3_writephy(tp, MII_BMCR, bmcr);
6894
6895         /* The write needs to be flushed for the FETs */
6896         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
6897                 tg3_readphy(tp, MII_BMCR, &bmcr);
6898
6899         udelay(40);
6900
6901         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
6902             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
6903                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
6904                              MII_TG3_FET_PTEST_FRC_TX_LINK |
6905                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
6906
6907                 /* The write needs to be flushed for the AC131 */
6908                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
6909         }
6910
6911         /* Reset to prevent losing 1st rx packet intermittently */
6912         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6913             tg3_flag(tp, 5780_CLASS)) {
6914                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6915                 udelay(10);
6916                 tw32_f(MAC_RX_MODE, tp->rx_mode);
6917         }
6918
6919         mac_mode = tp->mac_mode &
6920                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
6921         if (speed == SPEED_1000)
6922                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
6923         else
6924                 mac_mode |= MAC_MODE_PORT_MODE_MII;
6925
6926         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
6927                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
6928
6929                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
6930                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
6931                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
6932                         mac_mode |= MAC_MODE_LINK_POLARITY;
6933
6934                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
6935                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
6936         }
6937
6938         tw32(MAC_MODE, mac_mode);
6939         udelay(40);
6940
6941         return 0;
6942 }
6943
6944 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
6945 {
6946         struct tg3 *tp = netdev_priv(dev);
6947
6948         if (features & NETIF_F_LOOPBACK) {
6949                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6950                         return;
6951
6952                 spin_lock_bh(&tp->lock);
6953                 tg3_mac_loopback(tp, true);
6954                 netif_carrier_on(tp->dev);
6955                 spin_unlock_bh(&tp->lock);
6956                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6957         } else {
6958                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6959                         return;
6960
6961                 spin_lock_bh(&tp->lock);
6962                 tg3_mac_loopback(tp, false);
6963                 /* Force link status check */
6964                 tg3_setup_phy(tp, 1);
6965                 spin_unlock_bh(&tp->lock);
6966                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6967         }
6968 }
6969
6970 static netdev_features_t tg3_fix_features(struct net_device *dev,
6971         netdev_features_t features)
6972 {
6973         struct tg3 *tp = netdev_priv(dev);
6974
6975         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6976                 features &= ~NETIF_F_ALL_TSO;
6977
6978         return features;
6979 }
6980
6981 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
6982 {
6983         netdev_features_t changed = dev->features ^ features;
6984
6985         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6986                 tg3_set_loopback(dev, features);
6987
6988         return 0;
6989 }
6990
6991 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6992                                int new_mtu)
6993 {
6994         dev->mtu = new_mtu;
6995
6996         if (new_mtu > ETH_DATA_LEN) {
6997                 if (tg3_flag(tp, 5780_CLASS)) {
6998                         netdev_update_features(dev);
6999                         tg3_flag_clear(tp, TSO_CAPABLE);
7000                 } else {
7001                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
7002                 }
7003         } else {
7004                 if (tg3_flag(tp, 5780_CLASS)) {
7005                         tg3_flag_set(tp, TSO_CAPABLE);
7006                         netdev_update_features(dev);
7007                 }
7008                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
7009         }
7010 }
7011
7012 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
7013 {
7014         struct tg3 *tp = netdev_priv(dev);
7015         int err;
7016
7017         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
7018                 return -EINVAL;
7019
7020         if (!netif_running(dev)) {
7021                 /* We'll just catch it later when the
7022                  * device is up'd.
7023                  */
7024                 tg3_set_mtu(dev, tp, new_mtu);
7025                 return 0;
7026         }
7027
7028         tg3_phy_stop(tp);
7029
7030         tg3_netif_stop(tp);
7031
7032         tg3_full_lock(tp, 1);
7033
7034         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7035
7036         tg3_set_mtu(dev, tp, new_mtu);
7037
7038         err = tg3_restart_hw(tp, 0);
7039
7040         if (!err)
7041                 tg3_netif_start(tp);
7042
7043         tg3_full_unlock(tp);
7044
7045         if (!err)
7046                 tg3_phy_start(tp);
7047
7048         return err;
7049 }
7050
7051 static void tg3_rx_prodring_free(struct tg3 *tp,
7052                                  struct tg3_rx_prodring_set *tpr)
7053 {
7054         int i;
7055
7056         if (tpr != &tp->napi[0].prodring) {
7057                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7058                      i = (i + 1) & tp->rx_std_ring_mask)
7059                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7060                                         tp->rx_pkt_map_sz);
7061
7062                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7063                         for (i = tpr->rx_jmb_cons_idx;
7064                              i != tpr->rx_jmb_prod_idx;
7065                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7066                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7067                                                 TG3_RX_JMB_MAP_SZ);
7068                         }
7069                 }
7070
7071                 return;
7072         }
7073
7074         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7075                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7076                                 tp->rx_pkt_map_sz);
7077
7078         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7079                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7080                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7081                                         TG3_RX_JMB_MAP_SZ);
7082         }
7083 }
7084
7085 /* Initialize rx rings for packet processing.
7086  *
7087  * The chip has been shut down and the driver detached from
7088  * the networking, so no interrupts or new tx packets will
7089  * end up in the driver.  tp->{tx,}lock are held and thus
7090  * we may not sleep.
7091  */
7092 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7093                                  struct tg3_rx_prodring_set *tpr)
7094 {
7095         u32 i, rx_pkt_dma_sz;
7096
7097         tpr->rx_std_cons_idx = 0;
7098         tpr->rx_std_prod_idx = 0;
7099         tpr->rx_jmb_cons_idx = 0;
7100         tpr->rx_jmb_prod_idx = 0;
7101
7102         if (tpr != &tp->napi[0].prodring) {
7103                 memset(&tpr->rx_std_buffers[0], 0,
7104                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7105                 if (tpr->rx_jmb_buffers)
7106                         memset(&tpr->rx_jmb_buffers[0], 0,
7107                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7108                 goto done;
7109         }
7110
7111         /* Zero out all descriptors. */
7112         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7113
7114         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7115         if (tg3_flag(tp, 5780_CLASS) &&
7116             tp->dev->mtu > ETH_DATA_LEN)
7117                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7118         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7119
7120         /* Initialize invariants of the rings, we only set this
7121          * stuff once.  This works because the card does not
7122          * write into the rx buffer posting rings.
7123          */
7124         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7125                 struct tg3_rx_buffer_desc *rxd;
7126
7127                 rxd = &tpr->rx_std[i];
7128                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7129                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7130                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7131                                (i << RXD_OPAQUE_INDEX_SHIFT));
7132         }
7133
7134         /* Now allocate fresh SKBs for each rx ring. */
7135         for (i = 0; i < tp->rx_pending; i++) {
7136                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7137                         netdev_warn(tp->dev,
7138                                     "Using a smaller RX standard ring. Only "
7139                                     "%d out of %d buffers were allocated "
7140                                     "successfully\n", i, tp->rx_pending);
7141                         if (i == 0)
7142                                 goto initfail;
7143                         tp->rx_pending = i;
7144                         break;
7145                 }
7146         }
7147
7148         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7149                 goto done;
7150
7151         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7152
7153         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7154                 goto done;
7155
7156         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7157                 struct tg3_rx_buffer_desc *rxd;
7158
7159                 rxd = &tpr->rx_jmb[i].std;
7160                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7161                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7162                                   RXD_FLAG_JUMBO;
7163                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7164                        (i << RXD_OPAQUE_INDEX_SHIFT));
7165         }
7166
7167         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7168                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7169                         netdev_warn(tp->dev,
7170                                     "Using a smaller RX jumbo ring. Only %d "
7171                                     "out of %d buffers were allocated "
7172                                     "successfully\n", i, tp->rx_jumbo_pending);
7173                         if (i == 0)
7174                                 goto initfail;
7175                         tp->rx_jumbo_pending = i;
7176                         break;
7177                 }
7178         }
7179
7180 done:
7181         return 0;
7182
7183 initfail:
7184         tg3_rx_prodring_free(tp, tpr);
7185         return -ENOMEM;
7186 }
7187
7188 static void tg3_rx_prodring_fini(struct tg3 *tp,
7189                                  struct tg3_rx_prodring_set *tpr)
7190 {
7191         kfree(tpr->rx_std_buffers);
7192         tpr->rx_std_buffers = NULL;
7193         kfree(tpr->rx_jmb_buffers);
7194         tpr->rx_jmb_buffers = NULL;
7195         if (tpr->rx_std) {
7196                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7197                                   tpr->rx_std, tpr->rx_std_mapping);
7198                 tpr->rx_std = NULL;
7199         }
7200         if (tpr->rx_jmb) {
7201                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7202                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7203                 tpr->rx_jmb = NULL;
7204         }
7205 }
7206
7207 static int tg3_rx_prodring_init(struct tg3 *tp,
7208                                 struct tg3_rx_prodring_set *tpr)
7209 {
7210         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7211                                       GFP_KERNEL);
7212         if (!tpr->rx_std_buffers)
7213                 return -ENOMEM;
7214
7215         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7216                                          TG3_RX_STD_RING_BYTES(tp),
7217                                          &tpr->rx_std_mapping,
7218                                          GFP_KERNEL);
7219         if (!tpr->rx_std)
7220                 goto err_out;
7221
7222         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7223                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7224                                               GFP_KERNEL);
7225                 if (!tpr->rx_jmb_buffers)
7226                         goto err_out;
7227
7228                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7229                                                  TG3_RX_JMB_RING_BYTES(tp),
7230                                                  &tpr->rx_jmb_mapping,
7231                                                  GFP_KERNEL);
7232                 if (!tpr->rx_jmb)
7233                         goto err_out;
7234         }
7235
7236         return 0;
7237
7238 err_out:
7239         tg3_rx_prodring_fini(tp, tpr);
7240         return -ENOMEM;
7241 }
7242
7243 /* Free up pending packets in all rx/tx rings.
7244  *
7245  * The chip has been shut down and the driver detached from
7246  * the networking, so no interrupts or new tx packets will
7247  * end up in the driver.  tp->{tx,}lock is not held and we are not
7248  * in an interrupt context and thus may sleep.
7249  */
7250 static void tg3_free_rings(struct tg3 *tp)
7251 {
7252         int i, j;
7253
7254         for (j = 0; j < tp->irq_cnt; j++) {
7255                 struct tg3_napi *tnapi = &tp->napi[j];
7256
7257                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7258
7259                 if (!tnapi->tx_buffers)
7260                         continue;
7261
7262                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7263                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7264
7265                         if (!skb)
7266                                 continue;
7267
7268                         tg3_tx_skb_unmap(tnapi, i,
7269                                          skb_shinfo(skb)->nr_frags - 1);
7270
7271                         dev_kfree_skb_any(skb);
7272                 }
7273         }
7274 }
7275
7276 /* Initialize tx/rx rings for packet processing.
7277  *
7278  * The chip has been shut down and the driver detached from
7279  * the networking, so no interrupts or new tx packets will
7280  * end up in the driver.  tp->{tx,}lock are held and thus
7281  * we may not sleep.
7282  */
7283 static int tg3_init_rings(struct tg3 *tp)
7284 {
7285         int i;
7286
7287         /* Free up all the SKBs. */
7288         tg3_free_rings(tp);
7289
7290         for (i = 0; i < tp->irq_cnt; i++) {
7291                 struct tg3_napi *tnapi = &tp->napi[i];
7292
7293                 tnapi->last_tag = 0;
7294                 tnapi->last_irq_tag = 0;
7295                 tnapi->hw_status->status = 0;
7296                 tnapi->hw_status->status_tag = 0;
7297                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7298
7299                 tnapi->tx_prod = 0;
7300                 tnapi->tx_cons = 0;
7301                 if (tnapi->tx_ring)
7302                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7303
7304                 tnapi->rx_rcb_ptr = 0;
7305                 if (tnapi->rx_rcb)
7306                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7307
7308                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7309                         tg3_free_rings(tp);
7310                         return -ENOMEM;
7311                 }
7312         }
7313
7314         return 0;
7315 }
7316
7317 /*
7318  * Must not be invoked with interrupt sources disabled and
7319  * the hardware shutdown down.
7320  */
7321 static void tg3_free_consistent(struct tg3 *tp)
7322 {
7323         int i;
7324
7325         for (i = 0; i < tp->irq_cnt; i++) {
7326                 struct tg3_napi *tnapi = &tp->napi[i];
7327
7328                 if (tnapi->tx_ring) {
7329                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7330                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7331                         tnapi->tx_ring = NULL;
7332                 }
7333
7334                 kfree(tnapi->tx_buffers);
7335                 tnapi->tx_buffers = NULL;
7336
7337                 if (tnapi->rx_rcb) {
7338                         dma_free_coherent(&tp->pdev->dev,
7339                                           TG3_RX_RCB_RING_BYTES(tp),
7340                                           tnapi->rx_rcb,
7341                                           tnapi->rx_rcb_mapping);
7342                         tnapi->rx_rcb = NULL;
7343                 }
7344
7345                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7346
7347                 if (tnapi->hw_status) {
7348                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7349                                           tnapi->hw_status,
7350                                           tnapi->status_mapping);
7351                         tnapi->hw_status = NULL;
7352                 }
7353         }
7354
7355         if (tp->hw_stats) {
7356                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7357                                   tp->hw_stats, tp->stats_mapping);
7358                 tp->hw_stats = NULL;
7359         }
7360 }
7361
7362 /*
7363  * Must not be invoked with interrupt sources disabled and
7364  * the hardware shutdown down.  Can sleep.
7365  */
7366 static int tg3_alloc_consistent(struct tg3 *tp)
7367 {
7368         int i;
7369
7370         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7371                                           sizeof(struct tg3_hw_stats),
7372                                           &tp->stats_mapping,
7373                                           GFP_KERNEL);
7374         if (!tp->hw_stats)
7375                 goto err_out;
7376
7377         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7378
7379         for (i = 0; i < tp->irq_cnt; i++) {
7380                 struct tg3_napi *tnapi = &tp->napi[i];
7381                 struct tg3_hw_status *sblk;
7382
7383                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7384                                                       TG3_HW_STATUS_SIZE,
7385                                                       &tnapi->status_mapping,
7386                                                       GFP_KERNEL);
7387                 if (!tnapi->hw_status)
7388                         goto err_out;
7389
7390                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7391                 sblk = tnapi->hw_status;
7392
7393                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7394                         goto err_out;
7395
7396                 /* If multivector TSS is enabled, vector 0 does not handle
7397                  * tx interrupts.  Don't allocate any resources for it.
7398                  */
7399                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7400                     (i && tg3_flag(tp, ENABLE_TSS))) {
7401                         tnapi->tx_buffers = kzalloc(
7402                                                sizeof(struct tg3_tx_ring_info) *
7403                                                TG3_TX_RING_SIZE, GFP_KERNEL);
7404                         if (!tnapi->tx_buffers)
7405                                 goto err_out;
7406
7407                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7408                                                             TG3_TX_RING_BYTES,
7409                                                         &tnapi->tx_desc_mapping,
7410                                                             GFP_KERNEL);
7411                         if (!tnapi->tx_ring)
7412                                 goto err_out;
7413                 }
7414
7415                 /*
7416                  * When RSS is enabled, the status block format changes
7417                  * slightly.  The "rx_jumbo_consumer", "reserved",
7418                  * and "rx_mini_consumer" members get mapped to the
7419                  * other three rx return ring producer indexes.
7420                  */
7421                 switch (i) {
7422                 default:
7423                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7424                         break;
7425                 case 2:
7426                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7427                         break;
7428                 case 3:
7429                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
7430                         break;
7431                 case 4:
7432                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7433                         break;
7434                 }
7435
7436                 /*
7437                  * If multivector RSS is enabled, vector 0 does not handle
7438                  * rx or tx interrupts.  Don't allocate any resources for it.
7439                  */
7440                 if (!i && tg3_flag(tp, ENABLE_RSS))
7441                         continue;
7442
7443                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7444                                                    TG3_RX_RCB_RING_BYTES(tp),
7445                                                    &tnapi->rx_rcb_mapping,
7446                                                    GFP_KERNEL);
7447                 if (!tnapi->rx_rcb)
7448                         goto err_out;
7449
7450                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7451         }
7452
7453         return 0;
7454
7455 err_out:
7456         tg3_free_consistent(tp);
7457         return -ENOMEM;
7458 }
7459
7460 #define MAX_WAIT_CNT 1000
7461
7462 /* To stop a block, clear the enable bit and poll till it
7463  * clears.  tp->lock is held.
7464  */
7465 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7466 {
7467         unsigned int i;
7468         u32 val;
7469
7470         if (tg3_flag(tp, 5705_PLUS)) {
7471                 switch (ofs) {
7472                 case RCVLSC_MODE:
7473                 case DMAC_MODE:
7474                 case MBFREE_MODE:
7475                 case BUFMGR_MODE:
7476                 case MEMARB_MODE:
7477                         /* We can't enable/disable these bits of the
7478                          * 5705/5750, just say success.
7479                          */
7480                         return 0;
7481
7482                 default:
7483                         break;
7484                 }
7485         }
7486
7487         val = tr32(ofs);
7488         val &= ~enable_bit;
7489         tw32_f(ofs, val);
7490
7491         for (i = 0; i < MAX_WAIT_CNT; i++) {
7492                 udelay(100);
7493                 val = tr32(ofs);
7494                 if ((val & enable_bit) == 0)
7495                         break;
7496         }
7497
7498         if (i == MAX_WAIT_CNT && !silent) {
7499                 dev_err(&tp->pdev->dev,
7500                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7501                         ofs, enable_bit);
7502                 return -ENODEV;
7503         }
7504
7505         return 0;
7506 }
7507
7508 /* tp->lock is held. */
7509 static int tg3_abort_hw(struct tg3 *tp, int silent)
7510 {
7511         int i, err;
7512
7513         tg3_disable_ints(tp);
7514
7515         tp->rx_mode &= ~RX_MODE_ENABLE;
7516         tw32_f(MAC_RX_MODE, tp->rx_mode);
7517         udelay(10);
7518
7519         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7520         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7521         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7522         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7523         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7524         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7525
7526         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7527         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7528         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7529         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7530         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7531         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7532         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7533
7534         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7535         tw32_f(MAC_MODE, tp->mac_mode);
7536         udelay(40);
7537
7538         tp->tx_mode &= ~TX_MODE_ENABLE;
7539         tw32_f(MAC_TX_MODE, tp->tx_mode);
7540
7541         for (i = 0; i < MAX_WAIT_CNT; i++) {
7542                 udelay(100);
7543                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7544                         break;
7545         }
7546         if (i >= MAX_WAIT_CNT) {
7547                 dev_err(&tp->pdev->dev,
7548                         "%s timed out, TX_MODE_ENABLE will not clear "
7549                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7550                 err |= -ENODEV;
7551         }
7552
7553         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7554         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7555         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7556
7557         tw32(FTQ_RESET, 0xffffffff);
7558         tw32(FTQ_RESET, 0x00000000);
7559
7560         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7561         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7562
7563         for (i = 0; i < tp->irq_cnt; i++) {
7564                 struct tg3_napi *tnapi = &tp->napi[i];
7565                 if (tnapi->hw_status)
7566                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7567         }
7568         if (tp->hw_stats)
7569                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7570
7571         return err;
7572 }
7573
7574 /* Save PCI command register before chip reset */
7575 static void tg3_save_pci_state(struct tg3 *tp)
7576 {
7577         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7578 }
7579
7580 /* Restore PCI state after chip reset */
7581 static void tg3_restore_pci_state(struct tg3 *tp)
7582 {
7583         u32 val;
7584
7585         /* Re-enable indirect register accesses. */
7586         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7587                                tp->misc_host_ctrl);
7588
7589         /* Set MAX PCI retry to zero. */
7590         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7591         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7592             tg3_flag(tp, PCIX_MODE))
7593                 val |= PCISTATE_RETRY_SAME_DMA;
7594         /* Allow reads and writes to the APE register and memory space. */
7595         if (tg3_flag(tp, ENABLE_APE))
7596                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7597                        PCISTATE_ALLOW_APE_SHMEM_WR |
7598                        PCISTATE_ALLOW_APE_PSPACE_WR;
7599         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7600
7601         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7602
7603         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7604                 if (tg3_flag(tp, PCI_EXPRESS))
7605                         pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7606                 else {
7607                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7608                                               tp->pci_cacheline_sz);
7609                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7610                                               tp->pci_lat_timer);
7611                 }
7612         }
7613
7614         /* Make sure PCI-X relaxed ordering bit is clear. */
7615         if (tg3_flag(tp, PCIX_MODE)) {
7616                 u16 pcix_cmd;
7617
7618                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7619                                      &pcix_cmd);
7620                 pcix_cmd &= ~PCI_X_CMD_ERO;
7621                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7622                                       pcix_cmd);
7623         }
7624
7625         if (tg3_flag(tp, 5780_CLASS)) {
7626
7627                 /* Chip reset on 5780 will reset MSI enable bit,
7628                  * so need to restore it.
7629                  */
7630                 if (tg3_flag(tp, USING_MSI)) {
7631                         u16 ctrl;
7632
7633                         pci_read_config_word(tp->pdev,
7634                                              tp->msi_cap + PCI_MSI_FLAGS,
7635                                              &ctrl);
7636                         pci_write_config_word(tp->pdev,
7637                                               tp->msi_cap + PCI_MSI_FLAGS,
7638                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7639                         val = tr32(MSGINT_MODE);
7640                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7641                 }
7642         }
7643 }
7644
7645 /* tp->lock is held. */
7646 static int tg3_chip_reset(struct tg3 *tp)
7647 {
7648         u32 val;
7649         void (*write_op)(struct tg3 *, u32, u32);
7650         int i, err;
7651
7652         tg3_nvram_lock(tp);
7653
7654         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7655
7656         /* No matching tg3_nvram_unlock() after this because
7657          * chip reset below will undo the nvram lock.
7658          */
7659         tp->nvram_lock_cnt = 0;
7660
7661         /* GRC_MISC_CFG core clock reset will clear the memory
7662          * enable bit in PCI register 4 and the MSI enable bit
7663          * on some chips, so we save relevant registers here.
7664          */
7665         tg3_save_pci_state(tp);
7666
7667         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7668             tg3_flag(tp, 5755_PLUS))
7669                 tw32(GRC_FASTBOOT_PC, 0);
7670
7671         /*
7672          * We must avoid the readl() that normally takes place.
7673          * It locks machines, causes machine checks, and other
7674          * fun things.  So, temporarily disable the 5701
7675          * hardware workaround, while we do the reset.
7676          */
7677         write_op = tp->write32;
7678         if (write_op == tg3_write_flush_reg32)
7679                 tp->write32 = tg3_write32;
7680
7681         /* Prevent the irq handler from reading or writing PCI registers
7682          * during chip reset when the memory enable bit in the PCI command
7683          * register may be cleared.  The chip does not generate interrupt
7684          * at this time, but the irq handler may still be called due to irq
7685          * sharing or irqpoll.
7686          */
7687         tg3_flag_set(tp, CHIP_RESETTING);
7688         for (i = 0; i < tp->irq_cnt; i++) {
7689                 struct tg3_napi *tnapi = &tp->napi[i];
7690                 if (tnapi->hw_status) {
7691                         tnapi->hw_status->status = 0;
7692                         tnapi->hw_status->status_tag = 0;
7693                 }
7694                 tnapi->last_tag = 0;
7695                 tnapi->last_irq_tag = 0;
7696         }
7697         smp_mb();
7698
7699         for (i = 0; i < tp->irq_cnt; i++)
7700                 synchronize_irq(tp->napi[i].irq_vec);
7701
7702         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7703                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7704                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7705         }
7706
7707         /* do the reset */
7708         val = GRC_MISC_CFG_CORECLK_RESET;
7709
7710         if (tg3_flag(tp, PCI_EXPRESS)) {
7711                 /* Force PCIe 1.0a mode */
7712                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7713                     !tg3_flag(tp, 57765_PLUS) &&
7714                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7715                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7716                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7717
7718                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7719                         tw32(GRC_MISC_CFG, (1 << 29));
7720                         val |= (1 << 29);
7721                 }
7722         }
7723
7724         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7725                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7726                 tw32(GRC_VCPU_EXT_CTRL,
7727                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7728         }
7729
7730         /* Manage gphy power for all CPMU absent PCIe devices. */
7731         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7732                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7733
7734         tw32(GRC_MISC_CFG, val);
7735
7736         /* restore 5701 hardware bug workaround write method */
7737         tp->write32 = write_op;
7738
7739         /* Unfortunately, we have to delay before the PCI read back.
7740          * Some 575X chips even will not respond to a PCI cfg access
7741          * when the reset command is given to the chip.
7742          *
7743          * How do these hardware designers expect things to work
7744          * properly if the PCI write is posted for a long period
7745          * of time?  It is always necessary to have some method by
7746          * which a register read back can occur to push the write
7747          * out which does the reset.
7748          *
7749          * For most tg3 variants the trick below was working.
7750          * Ho hum...
7751          */
7752         udelay(120);
7753
7754         /* Flush PCI posted writes.  The normal MMIO registers
7755          * are inaccessible at this time so this is the only
7756          * way to make this reliably (actually, this is no longer
7757          * the case, see above).  I tried to use indirect
7758          * register read/write but this upset some 5701 variants.
7759          */
7760         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7761
7762         udelay(120);
7763
7764         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7765                 u16 val16;
7766
7767                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7768                         int i;
7769                         u32 cfg_val;
7770
7771                         /* Wait for link training to complete.  */
7772                         for (i = 0; i < 5000; i++)
7773                                 udelay(100);
7774
7775                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7776                         pci_write_config_dword(tp->pdev, 0xc4,
7777                                                cfg_val | (1 << 15));
7778                 }
7779
7780                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7781                 pci_read_config_word(tp->pdev,
7782                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7783                                      &val16);
7784                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7785                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7786                 /*
7787                  * Older PCIe devices only support the 128 byte
7788                  * MPS setting.  Enforce the restriction.
7789                  */
7790                 if (!tg3_flag(tp, CPMU_PRESENT))
7791                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7792                 pci_write_config_word(tp->pdev,
7793                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7794                                       val16);
7795
7796                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7797
7798                 /* Clear error status */
7799                 pci_write_config_word(tp->pdev,
7800                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7801                                       PCI_EXP_DEVSTA_CED |
7802                                       PCI_EXP_DEVSTA_NFED |
7803                                       PCI_EXP_DEVSTA_FED |
7804                                       PCI_EXP_DEVSTA_URD);
7805         }
7806
7807         tg3_restore_pci_state(tp);
7808
7809         tg3_flag_clear(tp, CHIP_RESETTING);
7810         tg3_flag_clear(tp, ERROR_PROCESSED);
7811
7812         val = 0;
7813         if (tg3_flag(tp, 5780_CLASS))
7814                 val = tr32(MEMARB_MODE);
7815         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7816
7817         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7818                 tg3_stop_fw(tp);
7819                 tw32(0x5000, 0x400);
7820         }
7821
7822         tw32(GRC_MODE, tp->grc_mode);
7823
7824         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7825                 val = tr32(0xc4);
7826
7827                 tw32(0xc4, val | (1 << 15));
7828         }
7829
7830         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7831             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7832                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7833                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7834                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7835                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7836         }
7837
7838         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7839                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7840                 val = tp->mac_mode;
7841         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7842                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7843                 val = tp->mac_mode;
7844         } else
7845                 val = 0;
7846
7847         tw32_f(MAC_MODE, val);
7848         udelay(40);
7849
7850         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7851
7852         err = tg3_poll_fw(tp);
7853         if (err)
7854                 return err;
7855
7856         tg3_mdio_start(tp);
7857
7858         if (tg3_flag(tp, PCI_EXPRESS) &&
7859             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7860             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7861             !tg3_flag(tp, 57765_PLUS)) {
7862                 val = tr32(0x7c00);
7863
7864                 tw32(0x7c00, val | (1 << 25));
7865         }
7866
7867         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7868                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7869                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7870         }
7871
7872         /* Reprobe ASF enable state.  */
7873         tg3_flag_clear(tp, ENABLE_ASF);
7874         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7875         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7876         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7877                 u32 nic_cfg;
7878
7879                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7880                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7881                         tg3_flag_set(tp, ENABLE_ASF);
7882                         tp->last_event_jiffies = jiffies;
7883                         if (tg3_flag(tp, 5750_PLUS))
7884                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7885                 }
7886         }
7887
7888         return 0;
7889 }
7890
7891 /* tp->lock is held. */
7892 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7893 {
7894         int err;
7895
7896         tg3_stop_fw(tp);
7897
7898         tg3_write_sig_pre_reset(tp, kind);
7899
7900         tg3_abort_hw(tp, silent);
7901         err = tg3_chip_reset(tp);
7902
7903         __tg3_set_mac_addr(tp, 0);
7904
7905         tg3_write_sig_legacy(tp, kind);
7906         tg3_write_sig_post_reset(tp, kind);
7907
7908         if (err)
7909                 return err;
7910
7911         return 0;
7912 }
7913
7914 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7915 {
7916         struct tg3 *tp = netdev_priv(dev);
7917         struct sockaddr *addr = p;
7918         int err = 0, skip_mac_1 = 0;
7919
7920         if (!is_valid_ether_addr(addr->sa_data))
7921                 return -EINVAL;
7922
7923         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7924
7925         if (!netif_running(dev))
7926                 return 0;
7927
7928         if (tg3_flag(tp, ENABLE_ASF)) {
7929                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7930
7931                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7932                 addr0_low = tr32(MAC_ADDR_0_LOW);
7933                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7934                 addr1_low = tr32(MAC_ADDR_1_LOW);
7935
7936                 /* Skip MAC addr 1 if ASF is using it. */
7937                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7938                     !(addr1_high == 0 && addr1_low == 0))
7939                         skip_mac_1 = 1;
7940         }
7941         spin_lock_bh(&tp->lock);
7942         __tg3_set_mac_addr(tp, skip_mac_1);
7943         spin_unlock_bh(&tp->lock);
7944
7945         return err;
7946 }
7947
7948 /* tp->lock is held. */
7949 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7950                            dma_addr_t mapping, u32 maxlen_flags,
7951                            u32 nic_addr)
7952 {
7953         tg3_write_mem(tp,
7954                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7955                       ((u64) mapping >> 32));
7956         tg3_write_mem(tp,
7957                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7958                       ((u64) mapping & 0xffffffff));
7959         tg3_write_mem(tp,
7960                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7961                        maxlen_flags);
7962
7963         if (!tg3_flag(tp, 5705_PLUS))
7964                 tg3_write_mem(tp,
7965                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7966                               nic_addr);
7967 }
7968
7969 static void __tg3_set_rx_mode(struct net_device *);
7970 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7971 {
7972         int i;
7973
7974         if (!tg3_flag(tp, ENABLE_TSS)) {
7975                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7976                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7977                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7978         } else {
7979                 tw32(HOSTCC_TXCOL_TICKS, 0);
7980                 tw32(HOSTCC_TXMAX_FRAMES, 0);
7981                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7982         }
7983
7984         if (!tg3_flag(tp, ENABLE_RSS)) {
7985                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7986                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7987                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7988         } else {
7989                 tw32(HOSTCC_RXCOL_TICKS, 0);
7990                 tw32(HOSTCC_RXMAX_FRAMES, 0);
7991                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7992         }
7993
7994         if (!tg3_flag(tp, 5705_PLUS)) {
7995                 u32 val = ec->stats_block_coalesce_usecs;
7996
7997                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7998                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7999
8000                 if (!netif_carrier_ok(tp->dev))
8001                         val = 0;
8002
8003                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8004         }
8005
8006         for (i = 0; i < tp->irq_cnt - 1; i++) {
8007                 u32 reg;
8008
8009                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8010                 tw32(reg, ec->rx_coalesce_usecs);
8011                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8012                 tw32(reg, ec->rx_max_coalesced_frames);
8013                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8014                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8015
8016                 if (tg3_flag(tp, ENABLE_TSS)) {
8017                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8018                         tw32(reg, ec->tx_coalesce_usecs);
8019                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8020                         tw32(reg, ec->tx_max_coalesced_frames);
8021                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8022                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8023                 }
8024         }
8025
8026         for (; i < tp->irq_max - 1; i++) {
8027                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8028                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8029                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8030
8031                 if (tg3_flag(tp, ENABLE_TSS)) {
8032                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8033                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8034                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8035                 }
8036         }
8037 }
8038
8039 /* tp->lock is held. */
8040 static void tg3_rings_reset(struct tg3 *tp)
8041 {
8042         int i;
8043         u32 stblk, txrcb, rxrcb, limit;
8044         struct tg3_napi *tnapi = &tp->napi[0];
8045
8046         /* Disable all transmit rings but the first. */
8047         if (!tg3_flag(tp, 5705_PLUS))
8048                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8049         else if (tg3_flag(tp, 5717_PLUS))
8050                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8051         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8052                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8053         else
8054                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8055
8056         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8057              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8058                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8059                               BDINFO_FLAGS_DISABLED);
8060
8061
8062         /* Disable all receive return rings but the first. */
8063         if (tg3_flag(tp, 5717_PLUS))
8064                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8065         else if (!tg3_flag(tp, 5705_PLUS))
8066                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8067         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8068                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8069                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8070         else
8071                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8072
8073         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8074              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8075                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8076                               BDINFO_FLAGS_DISABLED);
8077
8078         /* Disable interrupts */
8079         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8080         tp->napi[0].chk_msi_cnt = 0;
8081         tp->napi[0].last_rx_cons = 0;
8082         tp->napi[0].last_tx_cons = 0;
8083
8084         /* Zero mailbox registers. */
8085         if (tg3_flag(tp, SUPPORT_MSIX)) {
8086                 for (i = 1; i < tp->irq_max; i++) {
8087                         tp->napi[i].tx_prod = 0;
8088                         tp->napi[i].tx_cons = 0;
8089                         if (tg3_flag(tp, ENABLE_TSS))
8090                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8091                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8092                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8093                         tp->napi[i].chk_msi_cnt = 0;
8094                         tp->napi[i].last_rx_cons = 0;
8095                         tp->napi[i].last_tx_cons = 0;
8096                 }
8097                 if (!tg3_flag(tp, ENABLE_TSS))
8098                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8099         } else {
8100                 tp->napi[0].tx_prod = 0;
8101                 tp->napi[0].tx_cons = 0;
8102                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8103                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8104         }
8105
8106         /* Make sure the NIC-based send BD rings are disabled. */
8107         if (!tg3_flag(tp, 5705_PLUS)) {
8108                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8109                 for (i = 0; i < 16; i++)
8110                         tw32_tx_mbox(mbox + i * 8, 0);
8111         }
8112
8113         txrcb = NIC_SRAM_SEND_RCB;
8114         rxrcb = NIC_SRAM_RCV_RET_RCB;
8115
8116         /* Clear status block in ram. */
8117         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8118
8119         /* Set status block DMA address */
8120         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8121              ((u64) tnapi->status_mapping >> 32));
8122         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8123              ((u64) tnapi->status_mapping & 0xffffffff));
8124
8125         if (tnapi->tx_ring) {
8126                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8127                                (TG3_TX_RING_SIZE <<
8128                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8129                                NIC_SRAM_TX_BUFFER_DESC);
8130                 txrcb += TG3_BDINFO_SIZE;
8131         }
8132
8133         if (tnapi->rx_rcb) {
8134                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8135                                (tp->rx_ret_ring_mask + 1) <<
8136                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8137                 rxrcb += TG3_BDINFO_SIZE;
8138         }
8139
8140         stblk = HOSTCC_STATBLCK_RING1;
8141
8142         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8143                 u64 mapping = (u64)tnapi->status_mapping;
8144                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8145                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8146
8147                 /* Clear status block in ram. */
8148                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8149
8150                 if (tnapi->tx_ring) {
8151                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8152                                        (TG3_TX_RING_SIZE <<
8153                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8154                                        NIC_SRAM_TX_BUFFER_DESC);
8155                         txrcb += TG3_BDINFO_SIZE;
8156                 }
8157
8158                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8159                                ((tp->rx_ret_ring_mask + 1) <<
8160                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8161
8162                 stblk += 8;
8163                 rxrcb += TG3_BDINFO_SIZE;
8164         }
8165 }
8166
8167 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8168 {
8169         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8170
8171         if (!tg3_flag(tp, 5750_PLUS) ||
8172             tg3_flag(tp, 5780_CLASS) ||
8173             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8174             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8175             tg3_flag(tp, 57765_PLUS))
8176                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8177         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8178                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8179                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8180         else
8181                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8182
8183         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8184         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8185
8186         val = min(nic_rep_thresh, host_rep_thresh);
8187         tw32(RCVBDI_STD_THRESH, val);
8188
8189         if (tg3_flag(tp, 57765_PLUS))
8190                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8191
8192         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8193                 return;
8194
8195         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8196
8197         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8198
8199         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8200         tw32(RCVBDI_JUMBO_THRESH, val);
8201
8202         if (tg3_flag(tp, 57765_PLUS))
8203                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8204 }
8205
8206 /* tp->lock is held. */
8207 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8208 {
8209         u32 val, rdmac_mode;
8210         int i, err, limit;
8211         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8212
8213         tg3_disable_ints(tp);
8214
8215         tg3_stop_fw(tp);
8216
8217         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8218
8219         if (tg3_flag(tp, INIT_COMPLETE))
8220                 tg3_abort_hw(tp, 1);
8221
8222         /* Enable MAC control of LPI */
8223         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8224                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8225                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8226                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8227
8228                 tw32_f(TG3_CPMU_EEE_CTRL,
8229                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8230
8231                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8232                       TG3_CPMU_EEEMD_LPI_IN_TX |
8233                       TG3_CPMU_EEEMD_LPI_IN_RX |
8234                       TG3_CPMU_EEEMD_EEE_ENABLE;
8235
8236                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8237                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8238
8239                 if (tg3_flag(tp, ENABLE_APE))
8240                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8241
8242                 tw32_f(TG3_CPMU_EEE_MODE, val);
8243
8244                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8245                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8246                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8247
8248                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8249                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8250                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8251         }
8252
8253         if (reset_phy)
8254                 tg3_phy_reset(tp);
8255
8256         err = tg3_chip_reset(tp);
8257         if (err)
8258                 return err;
8259
8260         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8261
8262         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8263                 val = tr32(TG3_CPMU_CTRL);
8264                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8265                 tw32(TG3_CPMU_CTRL, val);
8266
8267                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8268                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8269                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8270                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8271
8272                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8273                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8274                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8275                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8276
8277                 val = tr32(TG3_CPMU_HST_ACC);
8278                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8279                 val |= CPMU_HST_ACC_MACCLK_6_25;
8280                 tw32(TG3_CPMU_HST_ACC, val);
8281         }
8282
8283         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8284                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8285                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8286                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8287                 tw32(PCIE_PWR_MGMT_THRESH, val);
8288
8289                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8290                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8291
8292                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8293
8294                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8295                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8296         }
8297
8298         if (tg3_flag(tp, L1PLLPD_EN)) {
8299                 u32 grc_mode = tr32(GRC_MODE);
8300
8301                 /* Access the lower 1K of PL PCIE block registers. */
8302                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8303                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8304
8305                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8306                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8307                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8308
8309                 tw32(GRC_MODE, grc_mode);
8310         }
8311
8312         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8313                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8314                         u32 grc_mode = tr32(GRC_MODE);
8315
8316                         /* Access the lower 1K of PL PCIE block registers. */
8317                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8318                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8319
8320                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8321                                    TG3_PCIE_PL_LO_PHYCTL5);
8322                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8323                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8324
8325                         tw32(GRC_MODE, grc_mode);
8326                 }
8327
8328                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8329                         u32 grc_mode = tr32(GRC_MODE);
8330
8331                         /* Access the lower 1K of DL PCIE block registers. */
8332                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8333                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8334
8335                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8336                                    TG3_PCIE_DL_LO_FTSMAX);
8337                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8338                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8339                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8340
8341                         tw32(GRC_MODE, grc_mode);
8342                 }
8343
8344                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8345                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8346                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8347                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8348         }
8349
8350         /* This works around an issue with Athlon chipsets on
8351          * B3 tigon3 silicon.  This bit has no effect on any
8352          * other revision.  But do not set this on PCI Express
8353          * chips and don't even touch the clocks if the CPMU is present.
8354          */
8355         if (!tg3_flag(tp, CPMU_PRESENT)) {
8356                 if (!tg3_flag(tp, PCI_EXPRESS))
8357                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8358                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8359         }
8360
8361         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8362             tg3_flag(tp, PCIX_MODE)) {
8363                 val = tr32(TG3PCI_PCISTATE);
8364                 val |= PCISTATE_RETRY_SAME_DMA;
8365                 tw32(TG3PCI_PCISTATE, val);
8366         }
8367
8368         if (tg3_flag(tp, ENABLE_APE)) {
8369                 /* Allow reads and writes to the
8370                  * APE register and memory space.
8371                  */
8372                 val = tr32(TG3PCI_PCISTATE);
8373                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8374                        PCISTATE_ALLOW_APE_SHMEM_WR |
8375                        PCISTATE_ALLOW_APE_PSPACE_WR;
8376                 tw32(TG3PCI_PCISTATE, val);
8377         }
8378
8379         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8380                 /* Enable some hw fixes.  */
8381                 val = tr32(TG3PCI_MSI_DATA);
8382                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8383                 tw32(TG3PCI_MSI_DATA, val);
8384         }
8385
8386         /* Descriptor ring init may make accesses to the
8387          * NIC SRAM area to setup the TX descriptors, so we
8388          * can only do this after the hardware has been
8389          * successfully reset.
8390          */
8391         err = tg3_init_rings(tp);
8392         if (err)
8393                 return err;
8394
8395         if (tg3_flag(tp, 57765_PLUS)) {
8396                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8397                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8398                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8399                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8400                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8401                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8402                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8403                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8404         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8405                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8406                 /* This value is determined during the probe time DMA
8407                  * engine test, tg3_test_dma.
8408                  */
8409                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8410         }
8411
8412         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8413                           GRC_MODE_4X_NIC_SEND_RINGS |
8414                           GRC_MODE_NO_TX_PHDR_CSUM |
8415                           GRC_MODE_NO_RX_PHDR_CSUM);
8416         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8417
8418         /* Pseudo-header checksum is done by hardware logic and not
8419          * the offload processers, so make the chip do the pseudo-
8420          * header checksums on receive.  For transmit it is more
8421          * convenient to do the pseudo-header checksum in software
8422          * as Linux does that on transmit for us in all cases.
8423          */
8424         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8425
8426         tw32(GRC_MODE,
8427              tp->grc_mode |
8428              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8429
8430         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8431         val = tr32(GRC_MISC_CFG);
8432         val &= ~0xff;
8433         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8434         tw32(GRC_MISC_CFG, val);
8435
8436         /* Initialize MBUF/DESC pool. */
8437         if (tg3_flag(tp, 5750_PLUS)) {
8438                 /* Do nothing.  */
8439         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8440                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8441                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8442                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8443                 else
8444                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8445                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8446                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8447         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8448                 int fw_len;
8449
8450                 fw_len = tp->fw_len;
8451                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8452                 tw32(BUFMGR_MB_POOL_ADDR,
8453                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8454                 tw32(BUFMGR_MB_POOL_SIZE,
8455                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8456         }
8457
8458         if (tp->dev->mtu <= ETH_DATA_LEN) {
8459                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8460                      tp->bufmgr_config.mbuf_read_dma_low_water);
8461                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8462                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8463                 tw32(BUFMGR_MB_HIGH_WATER,
8464                      tp->bufmgr_config.mbuf_high_water);
8465         } else {
8466                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8467                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8468                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8469                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8470                 tw32(BUFMGR_MB_HIGH_WATER,
8471                      tp->bufmgr_config.mbuf_high_water_jumbo);
8472         }
8473         tw32(BUFMGR_DMA_LOW_WATER,
8474              tp->bufmgr_config.dma_low_water);
8475         tw32(BUFMGR_DMA_HIGH_WATER,
8476              tp->bufmgr_config.dma_high_water);
8477
8478         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8479         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8480                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8481         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8482             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8483             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8484                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8485         tw32(BUFMGR_MODE, val);
8486         for (i = 0; i < 2000; i++) {
8487                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8488                         break;
8489                 udelay(10);
8490         }
8491         if (i >= 2000) {
8492                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8493                 return -ENODEV;
8494         }
8495
8496         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8497                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8498
8499         tg3_setup_rxbd_thresholds(tp);
8500
8501         /* Initialize TG3_BDINFO's at:
8502          *  RCVDBDI_STD_BD:     standard eth size rx ring
8503          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8504          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8505          *
8506          * like so:
8507          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8508          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8509          *                              ring attribute flags
8510          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8511          *
8512          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8513          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8514          *
8515          * The size of each ring is fixed in the firmware, but the location is
8516          * configurable.
8517          */
8518         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8519              ((u64) tpr->rx_std_mapping >> 32));
8520         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8521              ((u64) tpr->rx_std_mapping & 0xffffffff));
8522         if (!tg3_flag(tp, 5717_PLUS))
8523                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8524                      NIC_SRAM_RX_BUFFER_DESC);
8525
8526         /* Disable the mini ring */
8527         if (!tg3_flag(tp, 5705_PLUS))
8528                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8529                      BDINFO_FLAGS_DISABLED);
8530
8531         /* Program the jumbo buffer descriptor ring control
8532          * blocks on those devices that have them.
8533          */
8534         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8535             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8536
8537                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8538                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8539                              ((u64) tpr->rx_jmb_mapping >> 32));
8540                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8541                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8542                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8543                               BDINFO_FLAGS_MAXLEN_SHIFT;
8544                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8545                              val | BDINFO_FLAGS_USE_EXT_RECV);
8546                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8547                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8548                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8549                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8550                 } else {
8551                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8552                              BDINFO_FLAGS_DISABLED);
8553                 }
8554
8555                 if (tg3_flag(tp, 57765_PLUS)) {
8556                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8557                                 val = TG3_RX_STD_MAX_SIZE_5700;
8558                         else
8559                                 val = TG3_RX_STD_MAX_SIZE_5717;
8560                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8561                         val |= (TG3_RX_STD_DMA_SZ << 2);
8562                 } else
8563                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8564         } else
8565                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8566
8567         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8568
8569         tpr->rx_std_prod_idx = tp->rx_pending;
8570         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8571
8572         tpr->rx_jmb_prod_idx =
8573                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8574         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8575
8576         tg3_rings_reset(tp);
8577
8578         /* Initialize MAC address and backoff seed. */
8579         __tg3_set_mac_addr(tp, 0);
8580
8581         /* MTU + ethernet header + FCS + optional VLAN tag */
8582         tw32(MAC_RX_MTU_SIZE,
8583              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8584
8585         /* The slot time is changed by tg3_setup_phy if we
8586          * run at gigabit with half duplex.
8587          */
8588         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8589               (6 << TX_LENGTHS_IPG_SHIFT) |
8590               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8591
8592         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8593                 val |= tr32(MAC_TX_LENGTHS) &
8594                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8595                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8596
8597         tw32(MAC_TX_LENGTHS, val);
8598
8599         /* Receive rules. */
8600         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8601         tw32(RCVLPC_CONFIG, 0x0181);
8602
8603         /* Calculate RDMAC_MODE setting early, we need it to determine
8604          * the RCVLPC_STATE_ENABLE mask.
8605          */
8606         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8607                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8608                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8609                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8610                       RDMAC_MODE_LNGREAD_ENAB);
8611
8612         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8613                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8614
8615         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8616             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8617             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8618                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8619                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8620                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8621
8622         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8623             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8624                 if (tg3_flag(tp, TSO_CAPABLE) &&
8625                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8626                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8627                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8628                            !tg3_flag(tp, IS_5788)) {
8629                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8630                 }
8631         }
8632
8633         if (tg3_flag(tp, PCI_EXPRESS))
8634                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8635
8636         if (tg3_flag(tp, HW_TSO_1) ||
8637             tg3_flag(tp, HW_TSO_2) ||
8638             tg3_flag(tp, HW_TSO_3))
8639                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8640
8641         if (tg3_flag(tp, 57765_PLUS) ||
8642             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8643             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8644                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8645
8646         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8647                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8648
8649         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8650             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8651             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8652             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8653             tg3_flag(tp, 57765_PLUS)) {
8654                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8655                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8656                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8657                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8658                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8659                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8660                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8661                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8662                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8663                 }
8664                 tw32(TG3_RDMA_RSRVCTRL_REG,
8665                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8666         }
8667
8668         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8669             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8670                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8671                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8672                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8673                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8674         }
8675
8676         /* Receive/send statistics. */
8677         if (tg3_flag(tp, 5750_PLUS)) {
8678                 val = tr32(RCVLPC_STATS_ENABLE);
8679                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8680                 tw32(RCVLPC_STATS_ENABLE, val);
8681         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8682                    tg3_flag(tp, TSO_CAPABLE)) {
8683                 val = tr32(RCVLPC_STATS_ENABLE);
8684                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8685                 tw32(RCVLPC_STATS_ENABLE, val);
8686         } else {
8687                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8688         }
8689         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8690         tw32(SNDDATAI_STATSENAB, 0xffffff);
8691         tw32(SNDDATAI_STATSCTRL,
8692              (SNDDATAI_SCTRL_ENABLE |
8693               SNDDATAI_SCTRL_FASTUPD));
8694
8695         /* Setup host coalescing engine. */
8696         tw32(HOSTCC_MODE, 0);
8697         for (i = 0; i < 2000; i++) {
8698                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8699                         break;
8700                 udelay(10);
8701         }
8702
8703         __tg3_set_coalesce(tp, &tp->coal);
8704
8705         if (!tg3_flag(tp, 5705_PLUS)) {
8706                 /* Status/statistics block address.  See tg3_timer,
8707                  * the tg3_periodic_fetch_stats call there, and
8708                  * tg3_get_stats to see how this works for 5705/5750 chips.
8709                  */
8710                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8711                      ((u64) tp->stats_mapping >> 32));
8712                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8713                      ((u64) tp->stats_mapping & 0xffffffff));
8714                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8715
8716                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8717
8718                 /* Clear statistics and status block memory areas */
8719                 for (i = NIC_SRAM_STATS_BLK;
8720                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8721                      i += sizeof(u32)) {
8722                         tg3_write_mem(tp, i, 0);
8723                         udelay(40);
8724                 }
8725         }
8726
8727         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8728
8729         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8730         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8731         if (!tg3_flag(tp, 5705_PLUS))
8732                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8733
8734         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8735                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8736                 /* reset to prevent losing 1st rx packet intermittently */
8737                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8738                 udelay(10);
8739         }
8740
8741         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8742                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8743                         MAC_MODE_FHDE_ENABLE;
8744         if (tg3_flag(tp, ENABLE_APE))
8745                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8746         if (!tg3_flag(tp, 5705_PLUS) &&
8747             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8748             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8749                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8750         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8751         udelay(40);
8752
8753         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8754          * If TG3_FLAG_IS_NIC is zero, we should read the
8755          * register to preserve the GPIO settings for LOMs. The GPIOs,
8756          * whether used as inputs or outputs, are set by boot code after
8757          * reset.
8758          */
8759         if (!tg3_flag(tp, IS_NIC)) {
8760                 u32 gpio_mask;
8761
8762                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8763                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8764                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8765
8766                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8767                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8768                                      GRC_LCLCTRL_GPIO_OUTPUT3;
8769
8770                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8771                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8772
8773                 tp->grc_local_ctrl &= ~gpio_mask;
8774                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8775
8776                 /* GPIO1 must be driven high for eeprom write protect */
8777                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8778                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8779                                                GRC_LCLCTRL_GPIO_OUTPUT1);
8780         }
8781         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8782         udelay(100);
8783
8784         if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8785                 val = tr32(MSGINT_MODE);
8786                 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8787                 if (!tg3_flag(tp, 1SHOT_MSI))
8788                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
8789                 tw32(MSGINT_MODE, val);
8790         }
8791
8792         if (!tg3_flag(tp, 5705_PLUS)) {
8793                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8794                 udelay(40);
8795         }
8796
8797         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8798                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8799                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8800                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8801                WDMAC_MODE_LNGREAD_ENAB);
8802
8803         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8804             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8805                 if (tg3_flag(tp, TSO_CAPABLE) &&
8806                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8807                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8808                         /* nothing */
8809                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8810                            !tg3_flag(tp, IS_5788)) {
8811                         val |= WDMAC_MODE_RX_ACCEL;
8812                 }
8813         }
8814
8815         /* Enable host coalescing bug fix */
8816         if (tg3_flag(tp, 5755_PLUS))
8817                 val |= WDMAC_MODE_STATUS_TAG_FIX;
8818
8819         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8820                 val |= WDMAC_MODE_BURST_ALL_DATA;
8821
8822         tw32_f(WDMAC_MODE, val);
8823         udelay(40);
8824
8825         if (tg3_flag(tp, PCIX_MODE)) {
8826                 u16 pcix_cmd;
8827
8828                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8829                                      &pcix_cmd);
8830                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8831                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8832                         pcix_cmd |= PCI_X_CMD_READ_2K;
8833                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8834                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8835                         pcix_cmd |= PCI_X_CMD_READ_2K;
8836                 }
8837                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8838                                       pcix_cmd);
8839         }
8840
8841         tw32_f(RDMAC_MODE, rdmac_mode);
8842         udelay(40);
8843
8844         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8845         if (!tg3_flag(tp, 5705_PLUS))
8846                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8847
8848         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8849                 tw32(SNDDATAC_MODE,
8850                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8851         else
8852                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8853
8854         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8855         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8856         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8857         if (tg3_flag(tp, LRG_PROD_RING_CAP))
8858                 val |= RCVDBDI_MODE_LRG_RING_SZ;
8859         tw32(RCVDBDI_MODE, val);
8860         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8861         if (tg3_flag(tp, HW_TSO_1) ||
8862             tg3_flag(tp, HW_TSO_2) ||
8863             tg3_flag(tp, HW_TSO_3))
8864                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8865         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8866         if (tg3_flag(tp, ENABLE_TSS))
8867                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8868         tw32(SNDBDI_MODE, val);
8869         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8870
8871         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8872                 err = tg3_load_5701_a0_firmware_fix(tp);
8873                 if (err)
8874                         return err;
8875         }
8876
8877         if (tg3_flag(tp, TSO_CAPABLE)) {
8878                 err = tg3_load_tso_firmware(tp);
8879                 if (err)
8880                         return err;
8881         }
8882
8883         tp->tx_mode = TX_MODE_ENABLE;
8884
8885         if (tg3_flag(tp, 5755_PLUS) ||
8886             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8887                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8888
8889         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8890                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8891                 tp->tx_mode &= ~val;
8892                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8893         }
8894
8895         tw32_f(MAC_TX_MODE, tp->tx_mode);
8896         udelay(100);
8897
8898         if (tg3_flag(tp, ENABLE_RSS)) {
8899                 int i = 0;
8900                 u32 reg = MAC_RSS_INDIR_TBL_0;
8901
8902                 if (tp->irq_cnt == 2) {
8903                         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
8904                                 tw32(reg, 0x0);
8905                                 reg += 4;
8906                         }
8907                 } else {
8908                         u32 val;
8909
8910                         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8911                                 val = i % (tp->irq_cnt - 1);
8912                                 i++;
8913                                 for (; i % 8; i++) {
8914                                         val <<= 4;
8915                                         val |= (i % (tp->irq_cnt - 1));
8916                                 }
8917                                 tw32(reg, val);
8918                                 reg += 4;
8919                         }
8920                 }
8921
8922                 /* Setup the "secret" hash key. */
8923                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8924                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8925                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8926                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8927                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8928                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8929                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8930                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8931                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8932                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8933         }
8934
8935         tp->rx_mode = RX_MODE_ENABLE;
8936         if (tg3_flag(tp, 5755_PLUS))
8937                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8938
8939         if (tg3_flag(tp, ENABLE_RSS))
8940                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8941                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
8942                                RX_MODE_RSS_IPV6_HASH_EN |
8943                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
8944                                RX_MODE_RSS_IPV4_HASH_EN |
8945                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
8946
8947         tw32_f(MAC_RX_MODE, tp->rx_mode);
8948         udelay(10);
8949
8950         tw32(MAC_LED_CTRL, tp->led_ctrl);
8951
8952         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8953         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8954                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8955                 udelay(10);
8956         }
8957         tw32_f(MAC_RX_MODE, tp->rx_mode);
8958         udelay(10);
8959
8960         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8961                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8962                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8963                         /* Set drive transmission level to 1.2V  */
8964                         /* only if the signal pre-emphasis bit is not set  */
8965                         val = tr32(MAC_SERDES_CFG);
8966                         val &= 0xfffff000;
8967                         val |= 0x880;
8968                         tw32(MAC_SERDES_CFG, val);
8969                 }
8970                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8971                         tw32(MAC_SERDES_CFG, 0x616000);
8972         }
8973
8974         /* Prevent chip from dropping frames when flow control
8975          * is enabled.
8976          */
8977         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8978                 val = 1;
8979         else
8980                 val = 2;
8981         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8982
8983         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8984             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8985                 /* Use hardware link auto-negotiation */
8986                 tg3_flag_set(tp, HW_AUTONEG);
8987         }
8988
8989         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8990             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
8991                 u32 tmp;
8992
8993                 tmp = tr32(SERDES_RX_CTRL);
8994                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8995                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8996                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8997                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8998         }
8999
9000         if (!tg3_flag(tp, USE_PHYLIB)) {
9001                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
9002                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9003                         tp->link_config.speed = tp->link_config.orig_speed;
9004                         tp->link_config.duplex = tp->link_config.orig_duplex;
9005                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
9006                 }
9007
9008                 err = tg3_setup_phy(tp, 0);
9009                 if (err)
9010                         return err;
9011
9012                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9013                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9014                         u32 tmp;
9015
9016                         /* Clear CRC stats. */
9017                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9018                                 tg3_writephy(tp, MII_TG3_TEST1,
9019                                              tmp | MII_TG3_TEST1_CRC_EN);
9020                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9021                         }
9022                 }
9023         }
9024
9025         __tg3_set_rx_mode(tp->dev);
9026
9027         /* Initialize receive rules. */
9028         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9029         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9030         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9031         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9032
9033         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9034                 limit = 8;
9035         else
9036                 limit = 16;
9037         if (tg3_flag(tp, ENABLE_ASF))
9038                 limit -= 4;
9039         switch (limit) {
9040         case 16:
9041                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9042         case 15:
9043                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9044         case 14:
9045                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9046         case 13:
9047                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9048         case 12:
9049                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9050         case 11:
9051                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9052         case 10:
9053                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9054         case 9:
9055                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9056         case 8:
9057                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9058         case 7:
9059                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9060         case 6:
9061                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9062         case 5:
9063                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9064         case 4:
9065                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9066         case 3:
9067                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9068         case 2:
9069         case 1:
9070
9071         default:
9072                 break;
9073         }
9074
9075         if (tg3_flag(tp, ENABLE_APE))
9076                 /* Write our heartbeat update interval to APE. */
9077                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9078                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9079
9080         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9081
9082         return 0;
9083 }
9084
9085 /* Called at device open time to get the chip ready for
9086  * packet processing.  Invoked with tp->lock held.
9087  */
9088 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9089 {
9090         tg3_switch_clocks(tp);
9091
9092         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9093
9094         return tg3_reset_hw(tp, reset_phy);
9095 }
9096
9097 #define TG3_STAT_ADD32(PSTAT, REG) \
9098 do {    u32 __val = tr32(REG); \
9099         (PSTAT)->low += __val; \
9100         if ((PSTAT)->low < __val) \
9101                 (PSTAT)->high += 1; \
9102 } while (0)
9103
9104 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9105 {
9106         struct tg3_hw_stats *sp = tp->hw_stats;
9107
9108         if (!netif_carrier_ok(tp->dev))
9109                 return;
9110
9111         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9112         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9113         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9114         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9115         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9116         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9117         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9118         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9119         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9120         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9121         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9122         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9123         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9124
9125         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9126         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9127         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9128         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9129         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9130         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9131         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9132         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9133         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9134         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9135         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9136         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9137         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9138         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9139
9140         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9141         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9142             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9143             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9144                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9145         } else {
9146                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9147                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9148                 if (val) {
9149                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9150                         sp->rx_discards.low += val;
9151                         if (sp->rx_discards.low < val)
9152                                 sp->rx_discards.high += 1;
9153                 }
9154                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9155         }
9156         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9157 }
9158
9159 static void tg3_chk_missed_msi(struct tg3 *tp)
9160 {
9161         u32 i;
9162
9163         for (i = 0; i < tp->irq_cnt; i++) {
9164                 struct tg3_napi *tnapi = &tp->napi[i];
9165
9166                 if (tg3_has_work(tnapi)) {
9167                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9168                             tnapi->last_tx_cons == tnapi->tx_cons) {
9169                                 if (tnapi->chk_msi_cnt < 1) {
9170                                         tnapi->chk_msi_cnt++;
9171                                         return;
9172                                 }
9173                                 tg3_msi(0, tnapi);
9174                         }
9175                 }
9176                 tnapi->chk_msi_cnt = 0;
9177                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9178                 tnapi->last_tx_cons = tnapi->tx_cons;
9179         }
9180 }
9181
9182 static void tg3_timer(unsigned long __opaque)
9183 {
9184         struct tg3 *tp = (struct tg3 *) __opaque;
9185
9186         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9187                 goto restart_timer;
9188
9189         spin_lock(&tp->lock);
9190
9191         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9192             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9193                 tg3_chk_missed_msi(tp);
9194
9195         if (!tg3_flag(tp, TAGGED_STATUS)) {
9196                 /* All of this garbage is because when using non-tagged
9197                  * IRQ status the mailbox/status_block protocol the chip
9198                  * uses with the cpu is race prone.
9199                  */
9200                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9201                         tw32(GRC_LOCAL_CTRL,
9202                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9203                 } else {
9204                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9205                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9206                 }
9207
9208                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9209                         spin_unlock(&tp->lock);
9210                         tg3_reset_task_schedule(tp);
9211                         goto restart_timer;
9212                 }
9213         }
9214
9215         /* This part only runs once per second. */
9216         if (!--tp->timer_counter) {
9217                 if (tg3_flag(tp, 5705_PLUS))
9218                         tg3_periodic_fetch_stats(tp);
9219
9220                 if (tp->setlpicnt && !--tp->setlpicnt)
9221                         tg3_phy_eee_enable(tp);
9222
9223                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9224                         u32 mac_stat;
9225                         int phy_event;
9226
9227                         mac_stat = tr32(MAC_STATUS);
9228
9229                         phy_event = 0;
9230                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9231                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9232                                         phy_event = 1;
9233                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9234                                 phy_event = 1;
9235
9236                         if (phy_event)
9237                                 tg3_setup_phy(tp, 0);
9238                 } else if (tg3_flag(tp, POLL_SERDES)) {
9239                         u32 mac_stat = tr32(MAC_STATUS);
9240                         int need_setup = 0;
9241
9242                         if (netif_carrier_ok(tp->dev) &&
9243                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9244                                 need_setup = 1;
9245                         }
9246                         if (!netif_carrier_ok(tp->dev) &&
9247                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9248                                          MAC_STATUS_SIGNAL_DET))) {
9249                                 need_setup = 1;
9250                         }
9251                         if (need_setup) {
9252                                 if (!tp->serdes_counter) {
9253                                         tw32_f(MAC_MODE,
9254                                              (tp->mac_mode &
9255                                               ~MAC_MODE_PORT_MODE_MASK));
9256                                         udelay(40);
9257                                         tw32_f(MAC_MODE, tp->mac_mode);
9258                                         udelay(40);
9259                                 }
9260                                 tg3_setup_phy(tp, 0);
9261                         }
9262                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9263                            tg3_flag(tp, 5780_CLASS)) {
9264                         tg3_serdes_parallel_detect(tp);
9265                 }
9266
9267                 tp->timer_counter = tp->timer_multiplier;
9268         }
9269
9270         /* Heartbeat is only sent once every 2 seconds.
9271          *
9272          * The heartbeat is to tell the ASF firmware that the host
9273          * driver is still alive.  In the event that the OS crashes,
9274          * ASF needs to reset the hardware to free up the FIFO space
9275          * that may be filled with rx packets destined for the host.
9276          * If the FIFO is full, ASF will no longer function properly.
9277          *
9278          * Unintended resets have been reported on real time kernels
9279          * where the timer doesn't run on time.  Netpoll will also have
9280          * same problem.
9281          *
9282          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9283          * to check the ring condition when the heartbeat is expiring
9284          * before doing the reset.  This will prevent most unintended
9285          * resets.
9286          */
9287         if (!--tp->asf_counter) {
9288                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9289                         tg3_wait_for_event_ack(tp);
9290
9291                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9292                                       FWCMD_NICDRV_ALIVE3);
9293                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9294                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9295                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9296
9297                         tg3_generate_fw_event(tp);
9298                 }
9299                 tp->asf_counter = tp->asf_multiplier;
9300         }
9301
9302         spin_unlock(&tp->lock);
9303
9304 restart_timer:
9305         tp->timer.expires = jiffies + tp->timer_offset;
9306         add_timer(&tp->timer);
9307 }
9308
9309 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9310 {
9311         irq_handler_t fn;
9312         unsigned long flags;
9313         char *name;
9314         struct tg3_napi *tnapi = &tp->napi[irq_num];
9315
9316         if (tp->irq_cnt == 1)
9317                 name = tp->dev->name;
9318         else {
9319                 name = &tnapi->irq_lbl[0];
9320                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9321                 name[IFNAMSIZ-1] = 0;
9322         }
9323
9324         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9325                 fn = tg3_msi;
9326                 if (tg3_flag(tp, 1SHOT_MSI))
9327                         fn = tg3_msi_1shot;
9328                 flags = 0;
9329         } else {
9330                 fn = tg3_interrupt;
9331                 if (tg3_flag(tp, TAGGED_STATUS))
9332                         fn = tg3_interrupt_tagged;
9333                 flags = IRQF_SHARED;
9334         }
9335
9336         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9337 }
9338
9339 static int tg3_test_interrupt(struct tg3 *tp)
9340 {
9341         struct tg3_napi *tnapi = &tp->napi[0];
9342         struct net_device *dev = tp->dev;
9343         int err, i, intr_ok = 0;
9344         u32 val;
9345
9346         if (!netif_running(dev))
9347                 return -ENODEV;
9348
9349         tg3_disable_ints(tp);
9350
9351         free_irq(tnapi->irq_vec, tnapi);
9352
9353         /*
9354          * Turn off MSI one shot mode.  Otherwise this test has no
9355          * observable way to know whether the interrupt was delivered.
9356          */
9357         if (tg3_flag(tp, 57765_PLUS)) {
9358                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9359                 tw32(MSGINT_MODE, val);
9360         }
9361
9362         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9363                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9364         if (err)
9365                 return err;
9366
9367         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9368         tg3_enable_ints(tp);
9369
9370         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9371                tnapi->coal_now);
9372
9373         for (i = 0; i < 5; i++) {
9374                 u32 int_mbox, misc_host_ctrl;
9375
9376                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9377                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9378
9379                 if ((int_mbox != 0) ||
9380                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9381                         intr_ok = 1;
9382                         break;
9383                 }
9384
9385                 if (tg3_flag(tp, 57765_PLUS) &&
9386                     tnapi->hw_status->status_tag != tnapi->last_tag)
9387                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9388
9389                 msleep(10);
9390         }
9391
9392         tg3_disable_ints(tp);
9393
9394         free_irq(tnapi->irq_vec, tnapi);
9395
9396         err = tg3_request_irq(tp, 0);
9397
9398         if (err)
9399                 return err;
9400
9401         if (intr_ok) {
9402                 /* Reenable MSI one shot mode. */
9403                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9404                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9405                         tw32(MSGINT_MODE, val);
9406                 }
9407                 return 0;
9408         }
9409
9410         return -EIO;
9411 }
9412
9413 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9414  * successfully restored
9415  */
9416 static int tg3_test_msi(struct tg3 *tp)
9417 {
9418         int err;
9419         u16 pci_cmd;
9420
9421         if (!tg3_flag(tp, USING_MSI))
9422                 return 0;
9423
9424         /* Turn off SERR reporting in case MSI terminates with Master
9425          * Abort.
9426          */
9427         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9428         pci_write_config_word(tp->pdev, PCI_COMMAND,
9429                               pci_cmd & ~PCI_COMMAND_SERR);
9430
9431         err = tg3_test_interrupt(tp);
9432
9433         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9434
9435         if (!err)
9436                 return 0;
9437
9438         /* other failures */
9439         if (err != -EIO)
9440                 return err;
9441
9442         /* MSI test failed, go back to INTx mode */
9443         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9444                     "to INTx mode. Please report this failure to the PCI "
9445                     "maintainer and include system chipset information\n");
9446
9447         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9448
9449         pci_disable_msi(tp->pdev);
9450
9451         tg3_flag_clear(tp, USING_MSI);
9452         tp->napi[0].irq_vec = tp->pdev->irq;
9453
9454         err = tg3_request_irq(tp, 0);
9455         if (err)
9456                 return err;
9457
9458         /* Need to reset the chip because the MSI cycle may have terminated
9459          * with Master Abort.
9460          */
9461         tg3_full_lock(tp, 1);
9462
9463         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9464         err = tg3_init_hw(tp, 1);
9465
9466         tg3_full_unlock(tp);
9467
9468         if (err)
9469                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9470
9471         return err;
9472 }
9473
9474 static int tg3_request_firmware(struct tg3 *tp)
9475 {
9476         const __be32 *fw_data;
9477
9478         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9479                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9480                            tp->fw_needed);
9481                 return -ENOENT;
9482         }
9483
9484         fw_data = (void *)tp->fw->data;
9485
9486         /* Firmware blob starts with version numbers, followed by
9487          * start address and _full_ length including BSS sections
9488          * (which must be longer than the actual data, of course
9489          */
9490
9491         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9492         if (tp->fw_len < (tp->fw->size - 12)) {
9493                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9494                            tp->fw_len, tp->fw_needed);
9495                 release_firmware(tp->fw);
9496                 tp->fw = NULL;
9497                 return -EINVAL;
9498         }
9499
9500         /* We no longer need firmware; we have it. */
9501         tp->fw_needed = NULL;
9502         return 0;
9503 }
9504
9505 static bool tg3_enable_msix(struct tg3 *tp)
9506 {
9507         int i, rc, cpus = num_online_cpus();
9508         struct msix_entry msix_ent[tp->irq_max];
9509
9510         if (cpus == 1)
9511                 /* Just fallback to the simpler MSI mode. */
9512                 return false;
9513
9514         /*
9515          * We want as many rx rings enabled as there are cpus.
9516          * The first MSIX vector only deals with link interrupts, etc,
9517          * so we add one to the number of vectors we are requesting.
9518          */
9519         tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9520
9521         for (i = 0; i < tp->irq_max; i++) {
9522                 msix_ent[i].entry  = i;
9523                 msix_ent[i].vector = 0;
9524         }
9525
9526         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9527         if (rc < 0) {
9528                 return false;
9529         } else if (rc != 0) {
9530                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9531                         return false;
9532                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9533                               tp->irq_cnt, rc);
9534                 tp->irq_cnt = rc;
9535         }
9536
9537         for (i = 0; i < tp->irq_max; i++)
9538                 tp->napi[i].irq_vec = msix_ent[i].vector;
9539
9540         netif_set_real_num_tx_queues(tp->dev, 1);
9541         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9542         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9543                 pci_disable_msix(tp->pdev);
9544                 return false;
9545         }
9546
9547         if (tp->irq_cnt > 1) {
9548                 tg3_flag_set(tp, ENABLE_RSS);
9549
9550                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9551                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9552                         tg3_flag_set(tp, ENABLE_TSS);
9553                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9554                 }
9555         }
9556
9557         return true;
9558 }
9559
9560 static void tg3_ints_init(struct tg3 *tp)
9561 {
9562         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9563             !tg3_flag(tp, TAGGED_STATUS)) {
9564                 /* All MSI supporting chips should support tagged
9565                  * status.  Assert that this is the case.
9566                  */
9567                 netdev_warn(tp->dev,
9568                             "MSI without TAGGED_STATUS? Not using MSI\n");
9569                 goto defcfg;
9570         }
9571
9572         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9573                 tg3_flag_set(tp, USING_MSIX);
9574         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9575                 tg3_flag_set(tp, USING_MSI);
9576
9577         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9578                 u32 msi_mode = tr32(MSGINT_MODE);
9579                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9580                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9581                 if (!tg3_flag(tp, 1SHOT_MSI))
9582                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9583                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9584         }
9585 defcfg:
9586         if (!tg3_flag(tp, USING_MSIX)) {
9587                 tp->irq_cnt = 1;
9588                 tp->napi[0].irq_vec = tp->pdev->irq;
9589                 netif_set_real_num_tx_queues(tp->dev, 1);
9590                 netif_set_real_num_rx_queues(tp->dev, 1);
9591         }
9592 }
9593
9594 static void tg3_ints_fini(struct tg3 *tp)
9595 {
9596         if (tg3_flag(tp, USING_MSIX))
9597                 pci_disable_msix(tp->pdev);
9598         else if (tg3_flag(tp, USING_MSI))
9599                 pci_disable_msi(tp->pdev);
9600         tg3_flag_clear(tp, USING_MSI);
9601         tg3_flag_clear(tp, USING_MSIX);
9602         tg3_flag_clear(tp, ENABLE_RSS);
9603         tg3_flag_clear(tp, ENABLE_TSS);
9604 }
9605
9606 static int tg3_open(struct net_device *dev)
9607 {
9608         struct tg3 *tp = netdev_priv(dev);
9609         int i, err;
9610
9611         if (tp->fw_needed) {
9612                 err = tg3_request_firmware(tp);
9613                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9614                         if (err)
9615                                 return err;
9616                 } else if (err) {
9617                         netdev_warn(tp->dev, "TSO capability disabled\n");
9618                         tg3_flag_clear(tp, TSO_CAPABLE);
9619                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9620                         netdev_notice(tp->dev, "TSO capability restored\n");
9621                         tg3_flag_set(tp, TSO_CAPABLE);
9622                 }
9623         }
9624
9625         netif_carrier_off(tp->dev);
9626
9627         err = tg3_power_up(tp);
9628         if (err)
9629                 return err;
9630
9631         tg3_full_lock(tp, 0);
9632
9633         tg3_disable_ints(tp);
9634         tg3_flag_clear(tp, INIT_COMPLETE);
9635
9636         tg3_full_unlock(tp);
9637
9638         /*
9639          * Setup interrupts first so we know how
9640          * many NAPI resources to allocate
9641          */
9642         tg3_ints_init(tp);
9643
9644         /* The placement of this call is tied
9645          * to the setup and use of Host TX descriptors.
9646          */
9647         err = tg3_alloc_consistent(tp);
9648         if (err)
9649                 goto err_out1;
9650
9651         tg3_napi_init(tp);
9652
9653         tg3_napi_enable(tp);
9654
9655         for (i = 0; i < tp->irq_cnt; i++) {
9656                 struct tg3_napi *tnapi = &tp->napi[i];
9657                 err = tg3_request_irq(tp, i);
9658                 if (err) {
9659                         for (i--; i >= 0; i--) {
9660                                 tnapi = &tp->napi[i];
9661                                 free_irq(tnapi->irq_vec, tnapi);
9662                         }
9663                         goto err_out2;
9664                 }
9665         }
9666
9667         tg3_full_lock(tp, 0);
9668
9669         err = tg3_init_hw(tp, 1);
9670         if (err) {
9671                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9672                 tg3_free_rings(tp);
9673         } else {
9674                 if (tg3_flag(tp, TAGGED_STATUS) &&
9675                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9676                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9677                         tp->timer_offset = HZ;
9678                 else
9679                         tp->timer_offset = HZ / 10;
9680
9681                 BUG_ON(tp->timer_offset > HZ);
9682                 tp->timer_counter = tp->timer_multiplier =
9683                         (HZ / tp->timer_offset);
9684                 tp->asf_counter = tp->asf_multiplier =
9685                         ((HZ / tp->timer_offset) * 2);
9686
9687                 init_timer(&tp->timer);
9688                 tp->timer.expires = jiffies + tp->timer_offset;
9689                 tp->timer.data = (unsigned long) tp;
9690                 tp->timer.function = tg3_timer;
9691         }
9692
9693         tg3_full_unlock(tp);
9694
9695         if (err)
9696                 goto err_out3;
9697
9698         if (tg3_flag(tp, USING_MSI)) {
9699                 err = tg3_test_msi(tp);
9700
9701                 if (err) {
9702                         tg3_full_lock(tp, 0);
9703                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9704                         tg3_free_rings(tp);
9705                         tg3_full_unlock(tp);
9706
9707                         goto err_out2;
9708                 }
9709
9710                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9711                         u32 val = tr32(PCIE_TRANSACTION_CFG);
9712
9713                         tw32(PCIE_TRANSACTION_CFG,
9714                              val | PCIE_TRANS_CFG_1SHOT_MSI);
9715                 }
9716         }
9717
9718         tg3_phy_start(tp);
9719
9720         tg3_full_lock(tp, 0);
9721
9722         add_timer(&tp->timer);
9723         tg3_flag_set(tp, INIT_COMPLETE);
9724         tg3_enable_ints(tp);
9725
9726         tg3_full_unlock(tp);
9727
9728         netif_tx_start_all_queues(dev);
9729
9730         /*
9731          * Reset loopback feature if it was turned on while the device was down
9732          * make sure that it's installed properly now.
9733          */
9734         if (dev->features & NETIF_F_LOOPBACK)
9735                 tg3_set_loopback(dev, dev->features);
9736
9737         return 0;
9738
9739 err_out3:
9740         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9741                 struct tg3_napi *tnapi = &tp->napi[i];
9742                 free_irq(tnapi->irq_vec, tnapi);
9743         }
9744
9745 err_out2:
9746         tg3_napi_disable(tp);
9747         tg3_napi_fini(tp);
9748         tg3_free_consistent(tp);
9749
9750 err_out1:
9751         tg3_ints_fini(tp);
9752         tg3_frob_aux_power(tp, false);
9753         pci_set_power_state(tp->pdev, PCI_D3hot);
9754         return err;
9755 }
9756
9757 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9758                                                  struct rtnl_link_stats64 *);
9759 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9760
9761 static int tg3_close(struct net_device *dev)
9762 {
9763         int i;
9764         struct tg3 *tp = netdev_priv(dev);
9765
9766         tg3_napi_disable(tp);
9767         tg3_reset_task_cancel(tp);
9768
9769         netif_tx_stop_all_queues(dev);
9770
9771         del_timer_sync(&tp->timer);
9772
9773         tg3_phy_stop(tp);
9774
9775         tg3_full_lock(tp, 1);
9776
9777         tg3_disable_ints(tp);
9778
9779         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9780         tg3_free_rings(tp);
9781         tg3_flag_clear(tp, INIT_COMPLETE);
9782
9783         tg3_full_unlock(tp);
9784
9785         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9786                 struct tg3_napi *tnapi = &tp->napi[i];
9787                 free_irq(tnapi->irq_vec, tnapi);
9788         }
9789
9790         tg3_ints_fini(tp);
9791
9792         tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9793
9794         memcpy(&tp->estats_prev, tg3_get_estats(tp),
9795                sizeof(tp->estats_prev));
9796
9797         tg3_napi_fini(tp);
9798
9799         tg3_free_consistent(tp);
9800
9801         tg3_power_down(tp);
9802
9803         netif_carrier_off(tp->dev);
9804
9805         return 0;
9806 }
9807
9808 static inline u64 get_stat64(tg3_stat64_t *val)
9809 {
9810        return ((u64)val->high << 32) | ((u64)val->low);
9811 }
9812
9813 static u64 calc_crc_errors(struct tg3 *tp)
9814 {
9815         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9816
9817         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9818             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9819              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9820                 u32 val;
9821
9822                 spin_lock_bh(&tp->lock);
9823                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9824                         tg3_writephy(tp, MII_TG3_TEST1,
9825                                      val | MII_TG3_TEST1_CRC_EN);
9826                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9827                 } else
9828                         val = 0;
9829                 spin_unlock_bh(&tp->lock);
9830
9831                 tp->phy_crc_errors += val;
9832
9833                 return tp->phy_crc_errors;
9834         }
9835
9836         return get_stat64(&hw_stats->rx_fcs_errors);
9837 }
9838
9839 #define ESTAT_ADD(member) \
9840         estats->member =        old_estats->member + \
9841                                 get_stat64(&hw_stats->member)
9842
9843 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9844 {
9845         struct tg3_ethtool_stats *estats = &tp->estats;
9846         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9847         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9848
9849         if (!hw_stats)
9850                 return old_estats;
9851
9852         ESTAT_ADD(rx_octets);
9853         ESTAT_ADD(rx_fragments);
9854         ESTAT_ADD(rx_ucast_packets);
9855         ESTAT_ADD(rx_mcast_packets);
9856         ESTAT_ADD(rx_bcast_packets);
9857         ESTAT_ADD(rx_fcs_errors);
9858         ESTAT_ADD(rx_align_errors);
9859         ESTAT_ADD(rx_xon_pause_rcvd);
9860         ESTAT_ADD(rx_xoff_pause_rcvd);
9861         ESTAT_ADD(rx_mac_ctrl_rcvd);
9862         ESTAT_ADD(rx_xoff_entered);
9863         ESTAT_ADD(rx_frame_too_long_errors);
9864         ESTAT_ADD(rx_jabbers);
9865         ESTAT_ADD(rx_undersize_packets);
9866         ESTAT_ADD(rx_in_length_errors);
9867         ESTAT_ADD(rx_out_length_errors);
9868         ESTAT_ADD(rx_64_or_less_octet_packets);
9869         ESTAT_ADD(rx_65_to_127_octet_packets);
9870         ESTAT_ADD(rx_128_to_255_octet_packets);
9871         ESTAT_ADD(rx_256_to_511_octet_packets);
9872         ESTAT_ADD(rx_512_to_1023_octet_packets);
9873         ESTAT_ADD(rx_1024_to_1522_octet_packets);
9874         ESTAT_ADD(rx_1523_to_2047_octet_packets);
9875         ESTAT_ADD(rx_2048_to_4095_octet_packets);
9876         ESTAT_ADD(rx_4096_to_8191_octet_packets);
9877         ESTAT_ADD(rx_8192_to_9022_octet_packets);
9878
9879         ESTAT_ADD(tx_octets);
9880         ESTAT_ADD(tx_collisions);
9881         ESTAT_ADD(tx_xon_sent);
9882         ESTAT_ADD(tx_xoff_sent);
9883         ESTAT_ADD(tx_flow_control);
9884         ESTAT_ADD(tx_mac_errors);
9885         ESTAT_ADD(tx_single_collisions);
9886         ESTAT_ADD(tx_mult_collisions);
9887         ESTAT_ADD(tx_deferred);
9888         ESTAT_ADD(tx_excessive_collisions);
9889         ESTAT_ADD(tx_late_collisions);
9890         ESTAT_ADD(tx_collide_2times);
9891         ESTAT_ADD(tx_collide_3times);
9892         ESTAT_ADD(tx_collide_4times);
9893         ESTAT_ADD(tx_collide_5times);
9894         ESTAT_ADD(tx_collide_6times);
9895         ESTAT_ADD(tx_collide_7times);
9896         ESTAT_ADD(tx_collide_8times);
9897         ESTAT_ADD(tx_collide_9times);
9898         ESTAT_ADD(tx_collide_10times);
9899         ESTAT_ADD(tx_collide_11times);
9900         ESTAT_ADD(tx_collide_12times);
9901         ESTAT_ADD(tx_collide_13times);
9902         ESTAT_ADD(tx_collide_14times);
9903         ESTAT_ADD(tx_collide_15times);
9904         ESTAT_ADD(tx_ucast_packets);
9905         ESTAT_ADD(tx_mcast_packets);
9906         ESTAT_ADD(tx_bcast_packets);
9907         ESTAT_ADD(tx_carrier_sense_errors);
9908         ESTAT_ADD(tx_discards);
9909         ESTAT_ADD(tx_errors);
9910
9911         ESTAT_ADD(dma_writeq_full);
9912         ESTAT_ADD(dma_write_prioq_full);
9913         ESTAT_ADD(rxbds_empty);
9914         ESTAT_ADD(rx_discards);
9915         ESTAT_ADD(rx_errors);
9916         ESTAT_ADD(rx_threshold_hit);
9917
9918         ESTAT_ADD(dma_readq_full);
9919         ESTAT_ADD(dma_read_prioq_full);
9920         ESTAT_ADD(tx_comp_queue_full);
9921
9922         ESTAT_ADD(ring_set_send_prod_index);
9923         ESTAT_ADD(ring_status_update);
9924         ESTAT_ADD(nic_irqs);
9925         ESTAT_ADD(nic_avoided_irqs);
9926         ESTAT_ADD(nic_tx_threshold_hit);
9927
9928         ESTAT_ADD(mbuf_lwm_thresh_hit);
9929
9930         return estats;
9931 }
9932
9933 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9934                                                  struct rtnl_link_stats64 *stats)
9935 {
9936         struct tg3 *tp = netdev_priv(dev);
9937         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9938         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9939
9940         if (!hw_stats)
9941                 return old_stats;
9942
9943         stats->rx_packets = old_stats->rx_packets +
9944                 get_stat64(&hw_stats->rx_ucast_packets) +
9945                 get_stat64(&hw_stats->rx_mcast_packets) +
9946                 get_stat64(&hw_stats->rx_bcast_packets);
9947
9948         stats->tx_packets = old_stats->tx_packets +
9949                 get_stat64(&hw_stats->tx_ucast_packets) +
9950                 get_stat64(&hw_stats->tx_mcast_packets) +
9951                 get_stat64(&hw_stats->tx_bcast_packets);
9952
9953         stats->rx_bytes = old_stats->rx_bytes +
9954                 get_stat64(&hw_stats->rx_octets);
9955         stats->tx_bytes = old_stats->tx_bytes +
9956                 get_stat64(&hw_stats->tx_octets);
9957
9958         stats->rx_errors = old_stats->rx_errors +
9959                 get_stat64(&hw_stats->rx_errors);
9960         stats->tx_errors = old_stats->tx_errors +
9961                 get_stat64(&hw_stats->tx_errors) +
9962                 get_stat64(&hw_stats->tx_mac_errors) +
9963                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9964                 get_stat64(&hw_stats->tx_discards);
9965
9966         stats->multicast = old_stats->multicast +
9967                 get_stat64(&hw_stats->rx_mcast_packets);
9968         stats->collisions = old_stats->collisions +
9969                 get_stat64(&hw_stats->tx_collisions);
9970
9971         stats->rx_length_errors = old_stats->rx_length_errors +
9972                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9973                 get_stat64(&hw_stats->rx_undersize_packets);
9974
9975         stats->rx_over_errors = old_stats->rx_over_errors +
9976                 get_stat64(&hw_stats->rxbds_empty);
9977         stats->rx_frame_errors = old_stats->rx_frame_errors +
9978                 get_stat64(&hw_stats->rx_align_errors);
9979         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9980                 get_stat64(&hw_stats->tx_discards);
9981         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9982                 get_stat64(&hw_stats->tx_carrier_sense_errors);
9983
9984         stats->rx_crc_errors = old_stats->rx_crc_errors +
9985                 calc_crc_errors(tp);
9986
9987         stats->rx_missed_errors = old_stats->rx_missed_errors +
9988                 get_stat64(&hw_stats->rx_discards);
9989
9990         stats->rx_dropped = tp->rx_dropped;
9991         stats->tx_dropped = tp->tx_dropped;
9992
9993         return stats;
9994 }
9995
9996 static inline u32 calc_crc(unsigned char *buf, int len)
9997 {
9998         u32 reg;
9999         u32 tmp;
10000         int j, k;
10001
10002         reg = 0xffffffff;
10003
10004         for (j = 0; j < len; j++) {
10005                 reg ^= buf[j];
10006
10007                 for (k = 0; k < 8; k++) {
10008                         tmp = reg & 0x01;
10009
10010                         reg >>= 1;
10011
10012                         if (tmp)
10013                                 reg ^= 0xedb88320;
10014                 }
10015         }
10016
10017         return ~reg;
10018 }
10019
10020 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
10021 {
10022         /* accept or reject all multicast frames */
10023         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
10024         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
10025         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
10026         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
10027 }
10028
10029 static void __tg3_set_rx_mode(struct net_device *dev)
10030 {
10031         struct tg3 *tp = netdev_priv(dev);
10032         u32 rx_mode;
10033
10034         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
10035                                   RX_MODE_KEEP_VLAN_TAG);
10036
10037 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
10038         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
10039          * flag clear.
10040          */
10041         if (!tg3_flag(tp, ENABLE_ASF))
10042                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
10043 #endif
10044
10045         if (dev->flags & IFF_PROMISC) {
10046                 /* Promiscuous mode. */
10047                 rx_mode |= RX_MODE_PROMISC;
10048         } else if (dev->flags & IFF_ALLMULTI) {
10049                 /* Accept all multicast. */
10050                 tg3_set_multi(tp, 1);
10051         } else if (netdev_mc_empty(dev)) {
10052                 /* Reject all multicast. */
10053                 tg3_set_multi(tp, 0);
10054         } else {
10055                 /* Accept one or more multicast(s). */
10056                 struct netdev_hw_addr *ha;
10057                 u32 mc_filter[4] = { 0, };
10058                 u32 regidx;
10059                 u32 bit;
10060                 u32 crc;
10061
10062                 netdev_for_each_mc_addr(ha, dev) {
10063                         crc = calc_crc(ha->addr, ETH_ALEN);
10064                         bit = ~crc & 0x7f;
10065                         regidx = (bit & 0x60) >> 5;
10066                         bit &= 0x1f;
10067                         mc_filter[regidx] |= (1 << bit);
10068                 }
10069
10070                 tw32(MAC_HASH_REG_0, mc_filter[0]);
10071                 tw32(MAC_HASH_REG_1, mc_filter[1]);
10072                 tw32(MAC_HASH_REG_2, mc_filter[2]);
10073                 tw32(MAC_HASH_REG_3, mc_filter[3]);
10074         }
10075
10076         if (rx_mode != tp->rx_mode) {
10077                 tp->rx_mode = rx_mode;
10078                 tw32_f(MAC_RX_MODE, rx_mode);
10079                 udelay(10);
10080         }
10081 }
10082
10083 static void tg3_set_rx_mode(struct net_device *dev)
10084 {
10085         struct tg3 *tp = netdev_priv(dev);
10086
10087         if (!netif_running(dev))
10088                 return;
10089
10090         tg3_full_lock(tp, 0);
10091         __tg3_set_rx_mode(dev);
10092         tg3_full_unlock(tp);
10093 }
10094
10095 static int tg3_get_regs_len(struct net_device *dev)
10096 {
10097         return TG3_REG_BLK_SIZE;
10098 }
10099
10100 static void tg3_get_regs(struct net_device *dev,
10101                 struct ethtool_regs *regs, void *_p)
10102 {
10103         struct tg3 *tp = netdev_priv(dev);
10104
10105         regs->version = 0;
10106
10107         memset(_p, 0, TG3_REG_BLK_SIZE);
10108
10109         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10110                 return;
10111
10112         tg3_full_lock(tp, 0);
10113
10114         tg3_dump_legacy_regs(tp, (u32 *)_p);
10115
10116         tg3_full_unlock(tp);
10117 }
10118
10119 static int tg3_get_eeprom_len(struct net_device *dev)
10120 {
10121         struct tg3 *tp = netdev_priv(dev);
10122
10123         return tp->nvram_size;
10124 }
10125
10126 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10127 {
10128         struct tg3 *tp = netdev_priv(dev);
10129         int ret;
10130         u8  *pd;
10131         u32 i, offset, len, b_offset, b_count;
10132         __be32 val;
10133
10134         if (tg3_flag(tp, NO_NVRAM))
10135                 return -EINVAL;
10136
10137         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10138                 return -EAGAIN;
10139
10140         offset = eeprom->offset;
10141         len = eeprom->len;
10142         eeprom->len = 0;
10143
10144         eeprom->magic = TG3_EEPROM_MAGIC;
10145
10146         if (offset & 3) {
10147                 /* adjustments to start on required 4 byte boundary */
10148                 b_offset = offset & 3;
10149                 b_count = 4 - b_offset;
10150                 if (b_count > len) {
10151                         /* i.e. offset=1 len=2 */
10152                         b_count = len;
10153                 }
10154                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10155                 if (ret)
10156                         return ret;
10157                 memcpy(data, ((char *)&val) + b_offset, b_count);
10158                 len -= b_count;
10159                 offset += b_count;
10160                 eeprom->len += b_count;
10161         }
10162
10163         /* read bytes up to the last 4 byte boundary */
10164         pd = &data[eeprom->len];
10165         for (i = 0; i < (len - (len & 3)); i += 4) {
10166                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10167                 if (ret) {
10168                         eeprom->len += i;
10169                         return ret;
10170                 }
10171                 memcpy(pd + i, &val, 4);
10172         }
10173         eeprom->len += i;
10174
10175         if (len & 3) {
10176                 /* read last bytes not ending on 4 byte boundary */
10177                 pd = &data[eeprom->len];
10178                 b_count = len & 3;
10179                 b_offset = offset + len - b_count;
10180                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10181                 if (ret)
10182                         return ret;
10183                 memcpy(pd, &val, b_count);
10184                 eeprom->len += b_count;
10185         }
10186         return 0;
10187 }
10188
10189 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10190
10191 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10192 {
10193         struct tg3 *tp = netdev_priv(dev);
10194         int ret;
10195         u32 offset, len, b_offset, odd_len;
10196         u8 *buf;
10197         __be32 start, end;
10198
10199         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10200                 return -EAGAIN;
10201
10202         if (tg3_flag(tp, NO_NVRAM) ||
10203             eeprom->magic != TG3_EEPROM_MAGIC)
10204                 return -EINVAL;
10205
10206         offset = eeprom->offset;
10207         len = eeprom->len;
10208
10209         if ((b_offset = (offset & 3))) {
10210                 /* adjustments to start on required 4 byte boundary */
10211                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10212                 if (ret)
10213                         return ret;
10214                 len += b_offset;
10215                 offset &= ~3;
10216                 if (len < 4)
10217                         len = 4;
10218         }
10219
10220         odd_len = 0;
10221         if (len & 3) {
10222                 /* adjustments to end on required 4 byte boundary */
10223                 odd_len = 1;
10224                 len = (len + 3) & ~3;
10225                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10226                 if (ret)
10227                         return ret;
10228         }
10229
10230         buf = data;
10231         if (b_offset || odd_len) {
10232                 buf = kmalloc(len, GFP_KERNEL);
10233                 if (!buf)
10234                         return -ENOMEM;
10235                 if (b_offset)
10236                         memcpy(buf, &start, 4);
10237                 if (odd_len)
10238                         memcpy(buf+len-4, &end, 4);
10239                 memcpy(buf + b_offset, data, eeprom->len);
10240         }
10241
10242         ret = tg3_nvram_write_block(tp, offset, len, buf);
10243
10244         if (buf != data)
10245                 kfree(buf);
10246
10247         return ret;
10248 }
10249
10250 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10251 {
10252         struct tg3 *tp = netdev_priv(dev);
10253
10254         if (tg3_flag(tp, USE_PHYLIB)) {
10255                 struct phy_device *phydev;
10256                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10257                         return -EAGAIN;
10258                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10259                 return phy_ethtool_gset(phydev, cmd);
10260         }
10261
10262         cmd->supported = (SUPPORTED_Autoneg);
10263
10264         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10265                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10266                                    SUPPORTED_1000baseT_Full);
10267
10268         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10269                 cmd->supported |= (SUPPORTED_100baseT_Half |
10270                                   SUPPORTED_100baseT_Full |
10271                                   SUPPORTED_10baseT_Half |
10272                                   SUPPORTED_10baseT_Full |
10273                                   SUPPORTED_TP);
10274                 cmd->port = PORT_TP;
10275         } else {
10276                 cmd->supported |= SUPPORTED_FIBRE;
10277                 cmd->port = PORT_FIBRE;
10278         }
10279
10280         cmd->advertising = tp->link_config.advertising;
10281         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10282                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10283                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10284                                 cmd->advertising |= ADVERTISED_Pause;
10285                         } else {
10286                                 cmd->advertising |= ADVERTISED_Pause |
10287                                                     ADVERTISED_Asym_Pause;
10288                         }
10289                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10290                         cmd->advertising |= ADVERTISED_Asym_Pause;
10291                 }
10292         }
10293         if (netif_running(dev)) {
10294                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10295                 cmd->duplex = tp->link_config.active_duplex;
10296         } else {
10297                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10298                 cmd->duplex = DUPLEX_INVALID;
10299         }
10300         cmd->phy_address = tp->phy_addr;
10301         cmd->transceiver = XCVR_INTERNAL;
10302         cmd->autoneg = tp->link_config.autoneg;
10303         cmd->maxtxpkt = 0;
10304         cmd->maxrxpkt = 0;
10305         return 0;
10306 }
10307
10308 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10309 {
10310         struct tg3 *tp = netdev_priv(dev);
10311         u32 speed = ethtool_cmd_speed(cmd);
10312
10313         if (tg3_flag(tp, USE_PHYLIB)) {
10314                 struct phy_device *phydev;
10315                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10316                         return -EAGAIN;
10317                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10318                 return phy_ethtool_sset(phydev, cmd);
10319         }
10320
10321         if (cmd->autoneg != AUTONEG_ENABLE &&
10322             cmd->autoneg != AUTONEG_DISABLE)
10323                 return -EINVAL;
10324
10325         if (cmd->autoneg == AUTONEG_DISABLE &&
10326             cmd->duplex != DUPLEX_FULL &&
10327             cmd->duplex != DUPLEX_HALF)
10328                 return -EINVAL;
10329
10330         if (cmd->autoneg == AUTONEG_ENABLE) {
10331                 u32 mask = ADVERTISED_Autoneg |
10332                            ADVERTISED_Pause |
10333                            ADVERTISED_Asym_Pause;
10334
10335                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10336                         mask |= ADVERTISED_1000baseT_Half |
10337                                 ADVERTISED_1000baseT_Full;
10338
10339                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10340                         mask |= ADVERTISED_100baseT_Half |
10341                                 ADVERTISED_100baseT_Full |
10342                                 ADVERTISED_10baseT_Half |
10343                                 ADVERTISED_10baseT_Full |
10344                                 ADVERTISED_TP;
10345                 else
10346                         mask |= ADVERTISED_FIBRE;
10347
10348                 if (cmd->advertising & ~mask)
10349                         return -EINVAL;
10350
10351                 mask &= (ADVERTISED_1000baseT_Half |
10352                          ADVERTISED_1000baseT_Full |
10353                          ADVERTISED_100baseT_Half |
10354                          ADVERTISED_100baseT_Full |
10355                          ADVERTISED_10baseT_Half |
10356                          ADVERTISED_10baseT_Full);
10357
10358                 cmd->advertising &= mask;
10359         } else {
10360                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10361                         if (speed != SPEED_1000)
10362                                 return -EINVAL;
10363
10364                         if (cmd->duplex != DUPLEX_FULL)
10365                                 return -EINVAL;
10366                 } else {
10367                         if (speed != SPEED_100 &&
10368                             speed != SPEED_10)
10369                                 return -EINVAL;
10370                 }
10371         }
10372
10373         tg3_full_lock(tp, 0);
10374
10375         tp->link_config.autoneg = cmd->autoneg;
10376         if (cmd->autoneg == AUTONEG_ENABLE) {
10377                 tp->link_config.advertising = (cmd->advertising |
10378                                               ADVERTISED_Autoneg);
10379                 tp->link_config.speed = SPEED_INVALID;
10380                 tp->link_config.duplex = DUPLEX_INVALID;
10381         } else {
10382                 tp->link_config.advertising = 0;
10383                 tp->link_config.speed = speed;
10384                 tp->link_config.duplex = cmd->duplex;
10385         }
10386
10387         tp->link_config.orig_speed = tp->link_config.speed;
10388         tp->link_config.orig_duplex = tp->link_config.duplex;
10389         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10390
10391         if (netif_running(dev))
10392                 tg3_setup_phy(tp, 1);
10393
10394         tg3_full_unlock(tp);
10395
10396         return 0;
10397 }
10398
10399 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10400 {
10401         struct tg3 *tp = netdev_priv(dev);
10402
10403         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10404         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10405         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10406         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10407 }
10408
10409 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10410 {
10411         struct tg3 *tp = netdev_priv(dev);
10412
10413         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10414                 wol->supported = WAKE_MAGIC;
10415         else
10416                 wol->supported = 0;
10417         wol->wolopts = 0;
10418         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10419                 wol->wolopts = WAKE_MAGIC;
10420         memset(&wol->sopass, 0, sizeof(wol->sopass));
10421 }
10422
10423 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10424 {
10425         struct tg3 *tp = netdev_priv(dev);
10426         struct device *dp = &tp->pdev->dev;
10427
10428         if (wol->wolopts & ~WAKE_MAGIC)
10429                 return -EINVAL;
10430         if ((wol->wolopts & WAKE_MAGIC) &&
10431             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10432                 return -EINVAL;
10433
10434         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10435
10436         spin_lock_bh(&tp->lock);
10437         if (device_may_wakeup(dp))
10438                 tg3_flag_set(tp, WOL_ENABLE);
10439         else
10440                 tg3_flag_clear(tp, WOL_ENABLE);
10441         spin_unlock_bh(&tp->lock);
10442
10443         return 0;
10444 }
10445
10446 static u32 tg3_get_msglevel(struct net_device *dev)
10447 {
10448         struct tg3 *tp = netdev_priv(dev);
10449         return tp->msg_enable;
10450 }
10451
10452 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10453 {
10454         struct tg3 *tp = netdev_priv(dev);
10455         tp->msg_enable = value;
10456 }
10457
10458 static int tg3_nway_reset(struct net_device *dev)
10459 {
10460         struct tg3 *tp = netdev_priv(dev);
10461         int r;
10462
10463         if (!netif_running(dev))
10464                 return -EAGAIN;
10465
10466         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10467                 return -EINVAL;
10468
10469         if (tg3_flag(tp, USE_PHYLIB)) {
10470                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10471                         return -EAGAIN;
10472                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10473         } else {
10474                 u32 bmcr;
10475
10476                 spin_lock_bh(&tp->lock);
10477                 r = -EINVAL;
10478                 tg3_readphy(tp, MII_BMCR, &bmcr);
10479                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10480                     ((bmcr & BMCR_ANENABLE) ||
10481                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10482                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10483                                                    BMCR_ANENABLE);
10484                         r = 0;
10485                 }
10486                 spin_unlock_bh(&tp->lock);
10487         }
10488
10489         return r;
10490 }
10491
10492 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10493 {
10494         struct tg3 *tp = netdev_priv(dev);
10495
10496         ering->rx_max_pending = tp->rx_std_ring_mask;
10497         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10498                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10499         else
10500                 ering->rx_jumbo_max_pending = 0;
10501
10502         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10503
10504         ering->rx_pending = tp->rx_pending;
10505         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10506                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10507         else
10508                 ering->rx_jumbo_pending = 0;
10509
10510         ering->tx_pending = tp->napi[0].tx_pending;
10511 }
10512
10513 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10514 {
10515         struct tg3 *tp = netdev_priv(dev);
10516         int i, irq_sync = 0, err = 0;
10517
10518         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10519             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10520             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10521             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10522             (tg3_flag(tp, TSO_BUG) &&
10523              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10524                 return -EINVAL;
10525
10526         if (netif_running(dev)) {
10527                 tg3_phy_stop(tp);
10528                 tg3_netif_stop(tp);
10529                 irq_sync = 1;
10530         }
10531
10532         tg3_full_lock(tp, irq_sync);
10533
10534         tp->rx_pending = ering->rx_pending;
10535
10536         if (tg3_flag(tp, MAX_RXPEND_64) &&
10537             tp->rx_pending > 63)
10538                 tp->rx_pending = 63;
10539         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10540
10541         for (i = 0; i < tp->irq_max; i++)
10542                 tp->napi[i].tx_pending = ering->tx_pending;
10543
10544         if (netif_running(dev)) {
10545                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10546                 err = tg3_restart_hw(tp, 1);
10547                 if (!err)
10548                         tg3_netif_start(tp);
10549         }
10550
10551         tg3_full_unlock(tp);
10552
10553         if (irq_sync && !err)
10554                 tg3_phy_start(tp);
10555
10556         return err;
10557 }
10558
10559 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10560 {
10561         struct tg3 *tp = netdev_priv(dev);
10562
10563         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10564
10565         if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10566                 epause->rx_pause = 1;
10567         else
10568                 epause->rx_pause = 0;
10569
10570         if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10571                 epause->tx_pause = 1;
10572         else
10573                 epause->tx_pause = 0;
10574 }
10575
10576 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10577 {
10578         struct tg3 *tp = netdev_priv(dev);
10579         int err = 0;
10580
10581         if (tg3_flag(tp, USE_PHYLIB)) {
10582                 u32 newadv;
10583                 struct phy_device *phydev;
10584
10585                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10586
10587                 if (!(phydev->supported & SUPPORTED_Pause) ||
10588                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10589                      (epause->rx_pause != epause->tx_pause)))
10590                         return -EINVAL;
10591
10592                 tp->link_config.flowctrl = 0;
10593                 if (epause->rx_pause) {
10594                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10595
10596                         if (epause->tx_pause) {
10597                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10598                                 newadv = ADVERTISED_Pause;
10599                         } else
10600                                 newadv = ADVERTISED_Pause |
10601                                          ADVERTISED_Asym_Pause;
10602                 } else if (epause->tx_pause) {
10603                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10604                         newadv = ADVERTISED_Asym_Pause;
10605                 } else
10606                         newadv = 0;
10607
10608                 if (epause->autoneg)
10609                         tg3_flag_set(tp, PAUSE_AUTONEG);
10610                 else
10611                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10612
10613                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10614                         u32 oldadv = phydev->advertising &
10615                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10616                         if (oldadv != newadv) {
10617                                 phydev->advertising &=
10618                                         ~(ADVERTISED_Pause |
10619                                           ADVERTISED_Asym_Pause);
10620                                 phydev->advertising |= newadv;
10621                                 if (phydev->autoneg) {
10622                                         /*
10623                                          * Always renegotiate the link to
10624                                          * inform our link partner of our
10625                                          * flow control settings, even if the
10626                                          * flow control is forced.  Let
10627                                          * tg3_adjust_link() do the final
10628                                          * flow control setup.
10629                                          */
10630                                         return phy_start_aneg(phydev);
10631                                 }
10632                         }
10633
10634                         if (!epause->autoneg)
10635                                 tg3_setup_flow_control(tp, 0, 0);
10636                 } else {
10637                         tp->link_config.orig_advertising &=
10638                                         ~(ADVERTISED_Pause |
10639                                           ADVERTISED_Asym_Pause);
10640                         tp->link_config.orig_advertising |= newadv;
10641                 }
10642         } else {
10643                 int irq_sync = 0;
10644
10645                 if (netif_running(dev)) {
10646                         tg3_netif_stop(tp);
10647                         irq_sync = 1;
10648                 }
10649
10650                 tg3_full_lock(tp, irq_sync);
10651
10652                 if (epause->autoneg)
10653                         tg3_flag_set(tp, PAUSE_AUTONEG);
10654                 else
10655                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10656                 if (epause->rx_pause)
10657                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10658                 else
10659                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10660                 if (epause->tx_pause)
10661                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10662                 else
10663                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10664
10665                 if (netif_running(dev)) {
10666                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10667                         err = tg3_restart_hw(tp, 1);
10668                         if (!err)
10669                                 tg3_netif_start(tp);
10670                 }
10671
10672                 tg3_full_unlock(tp);
10673         }
10674
10675         return err;
10676 }
10677
10678 static int tg3_get_sset_count(struct net_device *dev, int sset)
10679 {
10680         switch (sset) {
10681         case ETH_SS_TEST:
10682                 return TG3_NUM_TEST;
10683         case ETH_SS_STATS:
10684                 return TG3_NUM_STATS;
10685         default:
10686                 return -EOPNOTSUPP;
10687         }
10688 }
10689
10690 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10691 {
10692         switch (stringset) {
10693         case ETH_SS_STATS:
10694                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10695                 break;
10696         case ETH_SS_TEST:
10697                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10698                 break;
10699         default:
10700                 WARN_ON(1);     /* we need a WARN() */
10701                 break;
10702         }
10703 }
10704
10705 static int tg3_set_phys_id(struct net_device *dev,
10706                             enum ethtool_phys_id_state state)
10707 {
10708         struct tg3 *tp = netdev_priv(dev);
10709
10710         if (!netif_running(tp->dev))
10711                 return -EAGAIN;
10712
10713         switch (state) {
10714         case ETHTOOL_ID_ACTIVE:
10715                 return 1;       /* cycle on/off once per second */
10716
10717         case ETHTOOL_ID_ON:
10718                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10719                      LED_CTRL_1000MBPS_ON |
10720                      LED_CTRL_100MBPS_ON |
10721                      LED_CTRL_10MBPS_ON |
10722                      LED_CTRL_TRAFFIC_OVERRIDE |
10723                      LED_CTRL_TRAFFIC_BLINK |
10724                      LED_CTRL_TRAFFIC_LED);
10725                 break;
10726
10727         case ETHTOOL_ID_OFF:
10728                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10729                      LED_CTRL_TRAFFIC_OVERRIDE);
10730                 break;
10731
10732         case ETHTOOL_ID_INACTIVE:
10733                 tw32(MAC_LED_CTRL, tp->led_ctrl);
10734                 break;
10735         }
10736
10737         return 0;
10738 }
10739
10740 static void tg3_get_ethtool_stats(struct net_device *dev,
10741                                    struct ethtool_stats *estats, u64 *tmp_stats)
10742 {
10743         struct tg3 *tp = netdev_priv(dev);
10744         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10745 }
10746
10747 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10748 {
10749         int i;
10750         __be32 *buf;
10751         u32 offset = 0, len = 0;
10752         u32 magic, val;
10753
10754         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10755                 return NULL;
10756
10757         if (magic == TG3_EEPROM_MAGIC) {
10758                 for (offset = TG3_NVM_DIR_START;
10759                      offset < TG3_NVM_DIR_END;
10760                      offset += TG3_NVM_DIRENT_SIZE) {
10761                         if (tg3_nvram_read(tp, offset, &val))
10762                                 return NULL;
10763
10764                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10765                             TG3_NVM_DIRTYPE_EXTVPD)
10766                                 break;
10767                 }
10768
10769                 if (offset != TG3_NVM_DIR_END) {
10770                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10771                         if (tg3_nvram_read(tp, offset + 4, &offset))
10772                                 return NULL;
10773
10774                         offset = tg3_nvram_logical_addr(tp, offset);
10775                 }
10776         }
10777
10778         if (!offset || !len) {
10779                 offset = TG3_NVM_VPD_OFF;
10780                 len = TG3_NVM_VPD_LEN;
10781         }
10782
10783         buf = kmalloc(len, GFP_KERNEL);
10784         if (buf == NULL)
10785                 return NULL;
10786
10787         if (magic == TG3_EEPROM_MAGIC) {
10788                 for (i = 0; i < len; i += 4) {
10789                         /* The data is in little-endian format in NVRAM.
10790                          * Use the big-endian read routines to preserve
10791                          * the byte order as it exists in NVRAM.
10792                          */
10793                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10794                                 goto error;
10795                 }
10796         } else {
10797                 u8 *ptr;
10798                 ssize_t cnt;
10799                 unsigned int pos = 0;
10800
10801                 ptr = (u8 *)&buf[0];
10802                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10803                         cnt = pci_read_vpd(tp->pdev, pos,
10804                                            len - pos, ptr);
10805                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
10806                                 cnt = 0;
10807                         else if (cnt < 0)
10808                                 goto error;
10809                 }
10810                 if (pos != len)
10811                         goto error;
10812         }
10813
10814         *vpdlen = len;
10815
10816         return buf;
10817
10818 error:
10819         kfree(buf);
10820         return NULL;
10821 }
10822
10823 #define NVRAM_TEST_SIZE 0x100
10824 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
10825 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
10826 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
10827 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
10828 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
10829 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
10830 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10831 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10832
10833 static int tg3_test_nvram(struct tg3 *tp)
10834 {
10835         u32 csum, magic, len;
10836         __be32 *buf;
10837         int i, j, k, err = 0, size;
10838
10839         if (tg3_flag(tp, NO_NVRAM))
10840                 return 0;
10841
10842         if (tg3_nvram_read(tp, 0, &magic) != 0)
10843                 return -EIO;
10844
10845         if (magic == TG3_EEPROM_MAGIC)
10846                 size = NVRAM_TEST_SIZE;
10847         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10848                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10849                     TG3_EEPROM_SB_FORMAT_1) {
10850                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10851                         case TG3_EEPROM_SB_REVISION_0:
10852                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10853                                 break;
10854                         case TG3_EEPROM_SB_REVISION_2:
10855                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10856                                 break;
10857                         case TG3_EEPROM_SB_REVISION_3:
10858                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10859                                 break;
10860                         case TG3_EEPROM_SB_REVISION_4:
10861                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10862                                 break;
10863                         case TG3_EEPROM_SB_REVISION_5:
10864                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10865                                 break;
10866                         case TG3_EEPROM_SB_REVISION_6:
10867                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10868                                 break;
10869                         default:
10870                                 return -EIO;
10871                         }
10872                 } else
10873                         return 0;
10874         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10875                 size = NVRAM_SELFBOOT_HW_SIZE;
10876         else
10877                 return -EIO;
10878
10879         buf = kmalloc(size, GFP_KERNEL);
10880         if (buf == NULL)
10881                 return -ENOMEM;
10882
10883         err = -EIO;
10884         for (i = 0, j = 0; i < size; i += 4, j++) {
10885                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10886                 if (err)
10887                         break;
10888         }
10889         if (i < size)
10890                 goto out;
10891
10892         /* Selfboot format */
10893         magic = be32_to_cpu(buf[0]);
10894         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10895             TG3_EEPROM_MAGIC_FW) {
10896                 u8 *buf8 = (u8 *) buf, csum8 = 0;
10897
10898                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10899                     TG3_EEPROM_SB_REVISION_2) {
10900                         /* For rev 2, the csum doesn't include the MBA. */
10901                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10902                                 csum8 += buf8[i];
10903                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10904                                 csum8 += buf8[i];
10905                 } else {
10906                         for (i = 0; i < size; i++)
10907                                 csum8 += buf8[i];
10908                 }
10909
10910                 if (csum8 == 0) {
10911                         err = 0;
10912                         goto out;
10913                 }
10914
10915                 err = -EIO;
10916                 goto out;
10917         }
10918
10919         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10920             TG3_EEPROM_MAGIC_HW) {
10921                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10922                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10923                 u8 *buf8 = (u8 *) buf;
10924
10925                 /* Separate the parity bits and the data bytes.  */
10926                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10927                         if ((i == 0) || (i == 8)) {
10928                                 int l;
10929                                 u8 msk;
10930
10931                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10932                                         parity[k++] = buf8[i] & msk;
10933                                 i++;
10934                         } else if (i == 16) {
10935                                 int l;
10936                                 u8 msk;
10937
10938                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10939                                         parity[k++] = buf8[i] & msk;
10940                                 i++;
10941
10942                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10943                                         parity[k++] = buf8[i] & msk;
10944                                 i++;
10945                         }
10946                         data[j++] = buf8[i];
10947                 }
10948
10949                 err = -EIO;
10950                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10951                         u8 hw8 = hweight8(data[i]);
10952
10953                         if ((hw8 & 0x1) && parity[i])
10954                                 goto out;
10955                         else if (!(hw8 & 0x1) && !parity[i])
10956                                 goto out;
10957                 }
10958                 err = 0;
10959                 goto out;
10960         }
10961
10962         err = -EIO;
10963
10964         /* Bootstrap checksum at offset 0x10 */
10965         csum = calc_crc((unsigned char *) buf, 0x10);
10966         if (csum != le32_to_cpu(buf[0x10/4]))
10967                 goto out;
10968
10969         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10970         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10971         if (csum != le32_to_cpu(buf[0xfc/4]))
10972                 goto out;
10973
10974         kfree(buf);
10975
10976         buf = tg3_vpd_readblock(tp, &len);
10977         if (!buf)
10978                 return -ENOMEM;
10979
10980         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
10981         if (i > 0) {
10982                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10983                 if (j < 0)
10984                         goto out;
10985
10986                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
10987                         goto out;
10988
10989                 i += PCI_VPD_LRDT_TAG_SIZE;
10990                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10991                                               PCI_VPD_RO_KEYWORD_CHKSUM);
10992                 if (j > 0) {
10993                         u8 csum8 = 0;
10994
10995                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
10996
10997                         for (i = 0; i <= j; i++)
10998                                 csum8 += ((u8 *)buf)[i];
10999
11000                         if (csum8)
11001                                 goto out;
11002                 }
11003         }
11004
11005         err = 0;
11006
11007 out:
11008         kfree(buf);
11009         return err;
11010 }
11011
11012 #define TG3_SERDES_TIMEOUT_SEC  2
11013 #define TG3_COPPER_TIMEOUT_SEC  6
11014
11015 static int tg3_test_link(struct tg3 *tp)
11016 {
11017         int i, max;
11018
11019         if (!netif_running(tp->dev))
11020                 return -ENODEV;
11021
11022         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11023                 max = TG3_SERDES_TIMEOUT_SEC;
11024         else
11025                 max = TG3_COPPER_TIMEOUT_SEC;
11026
11027         for (i = 0; i < max; i++) {
11028                 if (netif_carrier_ok(tp->dev))
11029                         return 0;
11030
11031                 if (msleep_interruptible(1000))
11032                         break;
11033         }
11034
11035         return -EIO;
11036 }
11037
11038 /* Only test the commonly used registers */
11039 static int tg3_test_registers(struct tg3 *tp)
11040 {
11041         int i, is_5705, is_5750;
11042         u32 offset, read_mask, write_mask, val, save_val, read_val;
11043         static struct {
11044                 u16 offset;
11045                 u16 flags;
11046 #define TG3_FL_5705     0x1
11047 #define TG3_FL_NOT_5705 0x2
11048 #define TG3_FL_NOT_5788 0x4
11049 #define TG3_FL_NOT_5750 0x8
11050                 u32 read_mask;
11051                 u32 write_mask;
11052         } reg_tbl[] = {
11053                 /* MAC Control Registers */
11054                 { MAC_MODE, TG3_FL_NOT_5705,
11055                         0x00000000, 0x00ef6f8c },
11056                 { MAC_MODE, TG3_FL_5705,
11057                         0x00000000, 0x01ef6b8c },
11058                 { MAC_STATUS, TG3_FL_NOT_5705,
11059                         0x03800107, 0x00000000 },
11060                 { MAC_STATUS, TG3_FL_5705,
11061                         0x03800100, 0x00000000 },
11062                 { MAC_ADDR_0_HIGH, 0x0000,
11063                         0x00000000, 0x0000ffff },
11064                 { MAC_ADDR_0_LOW, 0x0000,
11065                         0x00000000, 0xffffffff },
11066                 { MAC_RX_MTU_SIZE, 0x0000,
11067                         0x00000000, 0x0000ffff },
11068                 { MAC_TX_MODE, 0x0000,
11069                         0x00000000, 0x00000070 },
11070                 { MAC_TX_LENGTHS, 0x0000,
11071                         0x00000000, 0x00003fff },
11072                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11073                         0x00000000, 0x000007fc },
11074                 { MAC_RX_MODE, TG3_FL_5705,
11075                         0x00000000, 0x000007dc },
11076                 { MAC_HASH_REG_0, 0x0000,
11077                         0x00000000, 0xffffffff },
11078                 { MAC_HASH_REG_1, 0x0000,
11079                         0x00000000, 0xffffffff },
11080                 { MAC_HASH_REG_2, 0x0000,
11081                         0x00000000, 0xffffffff },
11082                 { MAC_HASH_REG_3, 0x0000,
11083                         0x00000000, 0xffffffff },
11084
11085                 /* Receive Data and Receive BD Initiator Control Registers. */
11086                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11087                         0x00000000, 0xffffffff },
11088                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11089                         0x00000000, 0xffffffff },
11090                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11091                         0x00000000, 0x00000003 },
11092                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11093                         0x00000000, 0xffffffff },
11094                 { RCVDBDI_STD_BD+0, 0x0000,
11095                         0x00000000, 0xffffffff },
11096                 { RCVDBDI_STD_BD+4, 0x0000,
11097                         0x00000000, 0xffffffff },
11098                 { RCVDBDI_STD_BD+8, 0x0000,
11099                         0x00000000, 0xffff0002 },
11100                 { RCVDBDI_STD_BD+0xc, 0x0000,
11101                         0x00000000, 0xffffffff },
11102
11103                 /* Receive BD Initiator Control Registers. */
11104                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11105                         0x00000000, 0xffffffff },
11106                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11107                         0x00000000, 0x000003ff },
11108                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11109                         0x00000000, 0xffffffff },
11110
11111                 /* Host Coalescing Control Registers. */
11112                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11113                         0x00000000, 0x00000004 },
11114                 { HOSTCC_MODE, TG3_FL_5705,
11115                         0x00000000, 0x000000f6 },
11116                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11117                         0x00000000, 0xffffffff },
11118                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11119                         0x00000000, 0x000003ff },
11120                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11121                         0x00000000, 0xffffffff },
11122                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11123                         0x00000000, 0x000003ff },
11124                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11125                         0x00000000, 0xffffffff },
11126                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11127                         0x00000000, 0x000000ff },
11128                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11129                         0x00000000, 0xffffffff },
11130                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11131                         0x00000000, 0x000000ff },
11132                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11133                         0x00000000, 0xffffffff },
11134                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11135                         0x00000000, 0xffffffff },
11136                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11137                         0x00000000, 0xffffffff },
11138                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11139                         0x00000000, 0x000000ff },
11140                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11141                         0x00000000, 0xffffffff },
11142                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11143                         0x00000000, 0x000000ff },
11144                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11145                         0x00000000, 0xffffffff },
11146                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11147                         0x00000000, 0xffffffff },
11148                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11149                         0x00000000, 0xffffffff },
11150                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11151                         0x00000000, 0xffffffff },
11152                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11153                         0x00000000, 0xffffffff },
11154                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11155                         0xffffffff, 0x00000000 },
11156                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11157                         0xffffffff, 0x00000000 },
11158
11159                 /* Buffer Manager Control Registers. */
11160                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11161                         0x00000000, 0x007fff80 },
11162                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11163                         0x00000000, 0x007fffff },
11164                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11165                         0x00000000, 0x0000003f },
11166                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11167                         0x00000000, 0x000001ff },
11168                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11169                         0x00000000, 0x000001ff },
11170                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11171                         0xffffffff, 0x00000000 },
11172                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11173                         0xffffffff, 0x00000000 },
11174
11175                 /* Mailbox Registers */
11176                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11177                         0x00000000, 0x000001ff },
11178                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11179                         0x00000000, 0x000001ff },
11180                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11181                         0x00000000, 0x000007ff },
11182                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11183                         0x00000000, 0x000001ff },
11184
11185                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11186         };
11187
11188         is_5705 = is_5750 = 0;
11189         if (tg3_flag(tp, 5705_PLUS)) {
11190                 is_5705 = 1;
11191                 if (tg3_flag(tp, 5750_PLUS))
11192                         is_5750 = 1;
11193         }
11194
11195         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11196                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11197                         continue;
11198
11199                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11200                         continue;
11201
11202                 if (tg3_flag(tp, IS_5788) &&
11203                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11204                         continue;
11205
11206                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11207                         continue;
11208
11209                 offset = (u32) reg_tbl[i].offset;
11210                 read_mask = reg_tbl[i].read_mask;
11211                 write_mask = reg_tbl[i].write_mask;
11212
11213                 /* Save the original register content */
11214                 save_val = tr32(offset);
11215
11216                 /* Determine the read-only value. */
11217                 read_val = save_val & read_mask;
11218
11219                 /* Write zero to the register, then make sure the read-only bits
11220                  * are not changed and the read/write bits are all zeros.
11221                  */
11222                 tw32(offset, 0);
11223
11224                 val = tr32(offset);
11225
11226                 /* Test the read-only and read/write bits. */
11227                 if (((val & read_mask) != read_val) || (val & write_mask))
11228                         goto out;
11229
11230                 /* Write ones to all the bits defined by RdMask and WrMask, then
11231                  * make sure the read-only bits are not changed and the
11232                  * read/write bits are all ones.
11233                  */
11234                 tw32(offset, read_mask | write_mask);
11235
11236                 val = tr32(offset);
11237
11238                 /* Test the read-only bits. */
11239                 if ((val & read_mask) != read_val)
11240                         goto out;
11241
11242                 /* Test the read/write bits. */
11243                 if ((val & write_mask) != write_mask)
11244                         goto out;
11245
11246                 tw32(offset, save_val);
11247         }
11248
11249         return 0;
11250
11251 out:
11252         if (netif_msg_hw(tp))
11253                 netdev_err(tp->dev,
11254                            "Register test failed at offset %x\n", offset);
11255         tw32(offset, save_val);
11256         return -EIO;
11257 }
11258
11259 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11260 {
11261         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11262         int i;
11263         u32 j;
11264
11265         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11266                 for (j = 0; j < len; j += 4) {
11267                         u32 val;
11268
11269                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11270                         tg3_read_mem(tp, offset + j, &val);
11271                         if (val != test_pattern[i])
11272                                 return -EIO;
11273                 }
11274         }
11275         return 0;
11276 }
11277
11278 static int tg3_test_memory(struct tg3 *tp)
11279 {
11280         static struct mem_entry {
11281                 u32 offset;
11282                 u32 len;
11283         } mem_tbl_570x[] = {
11284                 { 0x00000000, 0x00b50},
11285                 { 0x00002000, 0x1c000},
11286                 { 0xffffffff, 0x00000}
11287         }, mem_tbl_5705[] = {
11288                 { 0x00000100, 0x0000c},
11289                 { 0x00000200, 0x00008},
11290                 { 0x00004000, 0x00800},
11291                 { 0x00006000, 0x01000},
11292                 { 0x00008000, 0x02000},
11293                 { 0x00010000, 0x0e000},
11294                 { 0xffffffff, 0x00000}
11295         }, mem_tbl_5755[] = {
11296                 { 0x00000200, 0x00008},
11297                 { 0x00004000, 0x00800},
11298                 { 0x00006000, 0x00800},
11299                 { 0x00008000, 0x02000},
11300                 { 0x00010000, 0x0c000},
11301                 { 0xffffffff, 0x00000}
11302         }, mem_tbl_5906[] = {
11303                 { 0x00000200, 0x00008},
11304                 { 0x00004000, 0x00400},
11305                 { 0x00006000, 0x00400},
11306                 { 0x00008000, 0x01000},
11307                 { 0x00010000, 0x01000},
11308                 { 0xffffffff, 0x00000}
11309         }, mem_tbl_5717[] = {
11310                 { 0x00000200, 0x00008},
11311                 { 0x00010000, 0x0a000},
11312                 { 0x00020000, 0x13c00},
11313                 { 0xffffffff, 0x00000}
11314         }, mem_tbl_57765[] = {
11315                 { 0x00000200, 0x00008},
11316                 { 0x00004000, 0x00800},
11317                 { 0x00006000, 0x09800},
11318                 { 0x00010000, 0x0a000},
11319                 { 0xffffffff, 0x00000}
11320         };
11321         struct mem_entry *mem_tbl;
11322         int err = 0;
11323         int i;
11324
11325         if (tg3_flag(tp, 5717_PLUS))
11326                 mem_tbl = mem_tbl_5717;
11327         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11328                 mem_tbl = mem_tbl_57765;
11329         else if (tg3_flag(tp, 5755_PLUS))
11330                 mem_tbl = mem_tbl_5755;
11331         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11332                 mem_tbl = mem_tbl_5906;
11333         else if (tg3_flag(tp, 5705_PLUS))
11334                 mem_tbl = mem_tbl_5705;
11335         else
11336                 mem_tbl = mem_tbl_570x;
11337
11338         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11339                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11340                 if (err)
11341                         break;
11342         }
11343
11344         return err;
11345 }
11346
11347 #define TG3_TSO_MSS             500
11348
11349 #define TG3_TSO_IP_HDR_LEN      20
11350 #define TG3_TSO_TCP_HDR_LEN     20
11351 #define TG3_TSO_TCP_OPT_LEN     12
11352
11353 static const u8 tg3_tso_header[] = {
11354 0x08, 0x00,
11355 0x45, 0x00, 0x00, 0x00,
11356 0x00, 0x00, 0x40, 0x00,
11357 0x40, 0x06, 0x00, 0x00,
11358 0x0a, 0x00, 0x00, 0x01,
11359 0x0a, 0x00, 0x00, 0x02,
11360 0x0d, 0x00, 0xe0, 0x00,
11361 0x00, 0x00, 0x01, 0x00,
11362 0x00, 0x00, 0x02, 0x00,
11363 0x80, 0x10, 0x10, 0x00,
11364 0x14, 0x09, 0x00, 0x00,
11365 0x01, 0x01, 0x08, 0x0a,
11366 0x11, 0x11, 0x11, 0x11,
11367 0x11, 0x11, 0x11, 0x11,
11368 };
11369
11370 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11371 {
11372         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11373         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11374         u32 budget;
11375         struct sk_buff *skb;
11376         u8 *tx_data, *rx_data;
11377         dma_addr_t map;
11378         int num_pkts, tx_len, rx_len, i, err;
11379         struct tg3_rx_buffer_desc *desc;
11380         struct tg3_napi *tnapi, *rnapi;
11381         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11382
11383         tnapi = &tp->napi[0];
11384         rnapi = &tp->napi[0];
11385         if (tp->irq_cnt > 1) {
11386                 if (tg3_flag(tp, ENABLE_RSS))
11387                         rnapi = &tp->napi[1];
11388                 if (tg3_flag(tp, ENABLE_TSS))
11389                         tnapi = &tp->napi[1];
11390         }
11391         coal_now = tnapi->coal_now | rnapi->coal_now;
11392
11393         err = -EIO;
11394
11395         tx_len = pktsz;
11396         skb = netdev_alloc_skb(tp->dev, tx_len);
11397         if (!skb)
11398                 return -ENOMEM;
11399
11400         tx_data = skb_put(skb, tx_len);
11401         memcpy(tx_data, tp->dev->dev_addr, 6);
11402         memset(tx_data + 6, 0x0, 8);
11403
11404         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11405
11406         if (tso_loopback) {
11407                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11408
11409                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11410                               TG3_TSO_TCP_OPT_LEN;
11411
11412                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11413                        sizeof(tg3_tso_header));
11414                 mss = TG3_TSO_MSS;
11415
11416                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11417                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11418
11419                 /* Set the total length field in the IP header */
11420                 iph->tot_len = htons((u16)(mss + hdr_len));
11421
11422                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11423                               TXD_FLAG_CPU_POST_DMA);
11424
11425                 if (tg3_flag(tp, HW_TSO_1) ||
11426                     tg3_flag(tp, HW_TSO_2) ||
11427                     tg3_flag(tp, HW_TSO_3)) {
11428                         struct tcphdr *th;
11429                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11430                         th = (struct tcphdr *)&tx_data[val];
11431                         th->check = 0;
11432                 } else
11433                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11434
11435                 if (tg3_flag(tp, HW_TSO_3)) {
11436                         mss |= (hdr_len & 0xc) << 12;
11437                         if (hdr_len & 0x10)
11438                                 base_flags |= 0x00000010;
11439                         base_flags |= (hdr_len & 0x3e0) << 5;
11440                 } else if (tg3_flag(tp, HW_TSO_2))
11441                         mss |= hdr_len << 9;
11442                 else if (tg3_flag(tp, HW_TSO_1) ||
11443                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11444                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11445                 } else {
11446                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11447                 }
11448
11449                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11450         } else {
11451                 num_pkts = 1;
11452                 data_off = ETH_HLEN;
11453         }
11454
11455         for (i = data_off; i < tx_len; i++)
11456                 tx_data[i] = (u8) (i & 0xff);
11457
11458         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11459         if (pci_dma_mapping_error(tp->pdev, map)) {
11460                 dev_kfree_skb(skb);
11461                 return -EIO;
11462         }
11463
11464         val = tnapi->tx_prod;
11465         tnapi->tx_buffers[val].skb = skb;
11466         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11467
11468         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11469                rnapi->coal_now);
11470
11471         udelay(10);
11472
11473         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11474
11475         budget = tg3_tx_avail(tnapi);
11476         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11477                             base_flags | TXD_FLAG_END, mss, 0)) {
11478                 tnapi->tx_buffers[val].skb = NULL;
11479                 dev_kfree_skb(skb);
11480                 return -EIO;
11481         }
11482
11483         tnapi->tx_prod++;
11484
11485         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11486         tr32_mailbox(tnapi->prodmbox);
11487
11488         udelay(10);
11489
11490         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11491         for (i = 0; i < 35; i++) {
11492                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11493                        coal_now);
11494
11495                 udelay(10);
11496
11497                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11498                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11499                 if ((tx_idx == tnapi->tx_prod) &&
11500                     (rx_idx == (rx_start_idx + num_pkts)))
11501                         break;
11502         }
11503
11504         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11505         dev_kfree_skb(skb);
11506
11507         if (tx_idx != tnapi->tx_prod)
11508                 goto out;
11509
11510         if (rx_idx != rx_start_idx + num_pkts)
11511                 goto out;
11512
11513         val = data_off;
11514         while (rx_idx != rx_start_idx) {
11515                 desc = &rnapi->rx_rcb[rx_start_idx++];
11516                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11517                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11518
11519                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11520                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11521                         goto out;
11522
11523                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11524                          - ETH_FCS_LEN;
11525
11526                 if (!tso_loopback) {
11527                         if (rx_len != tx_len)
11528                                 goto out;
11529
11530                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11531                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11532                                         goto out;
11533                         } else {
11534                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11535                                         goto out;
11536                         }
11537                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11538                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11539                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11540                         goto out;
11541                 }
11542
11543                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11544                         rx_data = tpr->rx_std_buffers[desc_idx].data;
11545                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11546                                              mapping);
11547                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11548                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11549                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11550                                              mapping);
11551                 } else
11552                         goto out;
11553
11554                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11555                                             PCI_DMA_FROMDEVICE);
11556
11557                 rx_data += TG3_RX_OFFSET(tp);
11558                 for (i = data_off; i < rx_len; i++, val++) {
11559                         if (*(rx_data + i) != (u8) (val & 0xff))
11560                                 goto out;
11561                 }
11562         }
11563
11564         err = 0;
11565
11566         /* tg3_free_rings will unmap and free the rx_data */
11567 out:
11568         return err;
11569 }
11570
11571 #define TG3_STD_LOOPBACK_FAILED         1
11572 #define TG3_JMB_LOOPBACK_FAILED         2
11573 #define TG3_TSO_LOOPBACK_FAILED         4
11574 #define TG3_LOOPBACK_FAILED \
11575         (TG3_STD_LOOPBACK_FAILED | \
11576          TG3_JMB_LOOPBACK_FAILED | \
11577          TG3_TSO_LOOPBACK_FAILED)
11578
11579 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11580 {
11581         int err = -EIO;
11582         u32 eee_cap;
11583
11584         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11585         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11586
11587         if (!netif_running(tp->dev)) {
11588                 data[0] = TG3_LOOPBACK_FAILED;
11589                 data[1] = TG3_LOOPBACK_FAILED;
11590                 if (do_extlpbk)
11591                         data[2] = TG3_LOOPBACK_FAILED;
11592                 goto done;
11593         }
11594
11595         err = tg3_reset_hw(tp, 1);
11596         if (err) {
11597                 data[0] = TG3_LOOPBACK_FAILED;
11598                 data[1] = TG3_LOOPBACK_FAILED;
11599                 if (do_extlpbk)
11600                         data[2] = TG3_LOOPBACK_FAILED;
11601                 goto done;
11602         }
11603
11604         if (tg3_flag(tp, ENABLE_RSS)) {
11605                 int i;
11606
11607                 /* Reroute all rx packets to the 1st queue */
11608                 for (i = MAC_RSS_INDIR_TBL_0;
11609                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11610                         tw32(i, 0x0);
11611         }
11612
11613         /* HW errata - mac loopback fails in some cases on 5780.
11614          * Normal traffic and PHY loopback are not affected by
11615          * errata.  Also, the MAC loopback test is deprecated for
11616          * all newer ASIC revisions.
11617          */
11618         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11619             !tg3_flag(tp, CPMU_PRESENT)) {
11620                 tg3_mac_loopback(tp, true);
11621
11622                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11623                         data[0] |= TG3_STD_LOOPBACK_FAILED;
11624
11625                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11626                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11627                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
11628
11629                 tg3_mac_loopback(tp, false);
11630         }
11631
11632         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11633             !tg3_flag(tp, USE_PHYLIB)) {
11634                 int i;
11635
11636                 tg3_phy_lpbk_set(tp, 0, false);
11637
11638                 /* Wait for link */
11639                 for (i = 0; i < 100; i++) {
11640                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11641                                 break;
11642                         mdelay(1);
11643                 }
11644
11645                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11646                         data[1] |= TG3_STD_LOOPBACK_FAILED;
11647                 if (tg3_flag(tp, TSO_CAPABLE) &&
11648                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11649                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
11650                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11651                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11652                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
11653
11654                 if (do_extlpbk) {
11655                         tg3_phy_lpbk_set(tp, 0, true);
11656
11657                         /* All link indications report up, but the hardware
11658                          * isn't really ready for about 20 msec.  Double it
11659                          * to be sure.
11660                          */
11661                         mdelay(40);
11662
11663                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11664                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
11665                         if (tg3_flag(tp, TSO_CAPABLE) &&
11666                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11667                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11668                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11669                             tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11670                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11671                 }
11672
11673                 /* Re-enable gphy autopowerdown. */
11674                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11675                         tg3_phy_toggle_apd(tp, true);
11676         }
11677
11678         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11679
11680 done:
11681         tp->phy_flags |= eee_cap;
11682
11683         return err;
11684 }
11685
11686 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11687                           u64 *data)
11688 {
11689         struct tg3 *tp = netdev_priv(dev);
11690         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11691
11692         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11693             tg3_power_up(tp)) {
11694                 etest->flags |= ETH_TEST_FL_FAILED;
11695                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11696                 return;
11697         }
11698
11699         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11700
11701         if (tg3_test_nvram(tp) != 0) {
11702                 etest->flags |= ETH_TEST_FL_FAILED;
11703                 data[0] = 1;
11704         }
11705         if (!doextlpbk && tg3_test_link(tp)) {
11706                 etest->flags |= ETH_TEST_FL_FAILED;
11707                 data[1] = 1;
11708         }
11709         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11710                 int err, err2 = 0, irq_sync = 0;
11711
11712                 if (netif_running(dev)) {
11713                         tg3_phy_stop(tp);
11714                         tg3_netif_stop(tp);
11715                         irq_sync = 1;
11716                 }
11717
11718                 tg3_full_lock(tp, irq_sync);
11719
11720                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11721                 err = tg3_nvram_lock(tp);
11722                 tg3_halt_cpu(tp, RX_CPU_BASE);
11723                 if (!tg3_flag(tp, 5705_PLUS))
11724                         tg3_halt_cpu(tp, TX_CPU_BASE);
11725                 if (!err)
11726                         tg3_nvram_unlock(tp);
11727
11728                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11729                         tg3_phy_reset(tp);
11730
11731                 if (tg3_test_registers(tp) != 0) {
11732                         etest->flags |= ETH_TEST_FL_FAILED;
11733                         data[2] = 1;
11734                 }
11735
11736                 if (tg3_test_memory(tp) != 0) {
11737                         etest->flags |= ETH_TEST_FL_FAILED;
11738                         data[3] = 1;
11739                 }
11740
11741                 if (doextlpbk)
11742                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
11743
11744                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
11745                         etest->flags |= ETH_TEST_FL_FAILED;
11746
11747                 tg3_full_unlock(tp);
11748
11749                 if (tg3_test_interrupt(tp) != 0) {
11750                         etest->flags |= ETH_TEST_FL_FAILED;
11751                         data[7] = 1;
11752                 }
11753
11754                 tg3_full_lock(tp, 0);
11755
11756                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11757                 if (netif_running(dev)) {
11758                         tg3_flag_set(tp, INIT_COMPLETE);
11759                         err2 = tg3_restart_hw(tp, 1);
11760                         if (!err2)
11761                                 tg3_netif_start(tp);
11762                 }
11763
11764                 tg3_full_unlock(tp);
11765
11766                 if (irq_sync && !err2)
11767                         tg3_phy_start(tp);
11768         }
11769         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11770                 tg3_power_down(tp);
11771
11772 }
11773
11774 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11775 {
11776         struct mii_ioctl_data *data = if_mii(ifr);
11777         struct tg3 *tp = netdev_priv(dev);
11778         int err;
11779
11780         if (tg3_flag(tp, USE_PHYLIB)) {
11781                 struct phy_device *phydev;
11782                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11783                         return -EAGAIN;
11784                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11785                 return phy_mii_ioctl(phydev, ifr, cmd);
11786         }
11787
11788         switch (cmd) {
11789         case SIOCGMIIPHY:
11790                 data->phy_id = tp->phy_addr;
11791
11792                 /* fallthru */
11793         case SIOCGMIIREG: {
11794                 u32 mii_regval;
11795
11796                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11797                         break;                  /* We have no PHY */
11798
11799                 if (!netif_running(dev))
11800                         return -EAGAIN;
11801
11802                 spin_lock_bh(&tp->lock);
11803                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11804                 spin_unlock_bh(&tp->lock);
11805
11806                 data->val_out = mii_regval;
11807
11808                 return err;
11809         }
11810
11811         case SIOCSMIIREG:
11812                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11813                         break;                  /* We have no PHY */
11814
11815                 if (!netif_running(dev))
11816                         return -EAGAIN;
11817
11818                 spin_lock_bh(&tp->lock);
11819                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11820                 spin_unlock_bh(&tp->lock);
11821
11822                 return err;
11823
11824         default:
11825                 /* do nothing */
11826                 break;
11827         }
11828         return -EOPNOTSUPP;
11829 }
11830
11831 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11832 {
11833         struct tg3 *tp = netdev_priv(dev);
11834
11835         memcpy(ec, &tp->coal, sizeof(*ec));
11836         return 0;
11837 }
11838
11839 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11840 {
11841         struct tg3 *tp = netdev_priv(dev);
11842         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11843         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11844
11845         if (!tg3_flag(tp, 5705_PLUS)) {
11846                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11847                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11848                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11849                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11850         }
11851
11852         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11853             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11854             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11855             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11856             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11857             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11858             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11859             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11860             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11861             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11862                 return -EINVAL;
11863
11864         /* No rx interrupts will be generated if both are zero */
11865         if ((ec->rx_coalesce_usecs == 0) &&
11866             (ec->rx_max_coalesced_frames == 0))
11867                 return -EINVAL;
11868
11869         /* No tx interrupts will be generated if both are zero */
11870         if ((ec->tx_coalesce_usecs == 0) &&
11871             (ec->tx_max_coalesced_frames == 0))
11872                 return -EINVAL;
11873
11874         /* Only copy relevant parameters, ignore all others. */
11875         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11876         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11877         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11878         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11879         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11880         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11881         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11882         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11883         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11884
11885         if (netif_running(dev)) {
11886                 tg3_full_lock(tp, 0);
11887                 __tg3_set_coalesce(tp, &tp->coal);
11888                 tg3_full_unlock(tp);
11889         }
11890         return 0;
11891 }
11892
11893 static const struct ethtool_ops tg3_ethtool_ops = {
11894         .get_settings           = tg3_get_settings,
11895         .set_settings           = tg3_set_settings,
11896         .get_drvinfo            = tg3_get_drvinfo,
11897         .get_regs_len           = tg3_get_regs_len,
11898         .get_regs               = tg3_get_regs,
11899         .get_wol                = tg3_get_wol,
11900         .set_wol                = tg3_set_wol,
11901         .get_msglevel           = tg3_get_msglevel,
11902         .set_msglevel           = tg3_set_msglevel,
11903         .nway_reset             = tg3_nway_reset,
11904         .get_link               = ethtool_op_get_link,
11905         .get_eeprom_len         = tg3_get_eeprom_len,
11906         .get_eeprom             = tg3_get_eeprom,
11907         .set_eeprom             = tg3_set_eeprom,
11908         .get_ringparam          = tg3_get_ringparam,
11909         .set_ringparam          = tg3_set_ringparam,
11910         .get_pauseparam         = tg3_get_pauseparam,
11911         .set_pauseparam         = tg3_set_pauseparam,
11912         .self_test              = tg3_self_test,
11913         .get_strings            = tg3_get_strings,
11914         .set_phys_id            = tg3_set_phys_id,
11915         .get_ethtool_stats      = tg3_get_ethtool_stats,
11916         .get_coalesce           = tg3_get_coalesce,
11917         .set_coalesce           = tg3_set_coalesce,
11918         .get_sset_count         = tg3_get_sset_count,
11919 };
11920
11921 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11922 {
11923         u32 cursize, val, magic;
11924
11925         tp->nvram_size = EEPROM_CHIP_SIZE;
11926
11927         if (tg3_nvram_read(tp, 0, &magic) != 0)
11928                 return;
11929
11930         if ((magic != TG3_EEPROM_MAGIC) &&
11931             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11932             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11933                 return;
11934
11935         /*
11936          * Size the chip by reading offsets at increasing powers of two.
11937          * When we encounter our validation signature, we know the addressing
11938          * has wrapped around, and thus have our chip size.
11939          */
11940         cursize = 0x10;
11941
11942         while (cursize < tp->nvram_size) {
11943                 if (tg3_nvram_read(tp, cursize, &val) != 0)
11944                         return;
11945
11946                 if (val == magic)
11947                         break;
11948
11949                 cursize <<= 1;
11950         }
11951
11952         tp->nvram_size = cursize;
11953 }
11954
11955 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11956 {
11957         u32 val;
11958
11959         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11960                 return;
11961
11962         /* Selfboot format */
11963         if (val != TG3_EEPROM_MAGIC) {
11964                 tg3_get_eeprom_size(tp);
11965                 return;
11966         }
11967
11968         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11969                 if (val != 0) {
11970                         /* This is confusing.  We want to operate on the
11971                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
11972                          * call will read from NVRAM and byteswap the data
11973                          * according to the byteswapping settings for all
11974                          * other register accesses.  This ensures the data we
11975                          * want will always reside in the lower 16-bits.
11976                          * However, the data in NVRAM is in LE format, which
11977                          * means the data from the NVRAM read will always be
11978                          * opposite the endianness of the CPU.  The 16-bit
11979                          * byteswap then brings the data to CPU endianness.
11980                          */
11981                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11982                         return;
11983                 }
11984         }
11985         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11986 }
11987
11988 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11989 {
11990         u32 nvcfg1;
11991
11992         nvcfg1 = tr32(NVRAM_CFG1);
11993         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11994                 tg3_flag_set(tp, FLASH);
11995         } else {
11996                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11997                 tw32(NVRAM_CFG1, nvcfg1);
11998         }
11999
12000         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12001             tg3_flag(tp, 5780_CLASS)) {
12002                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12003                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12004                         tp->nvram_jedecnum = JEDEC_ATMEL;
12005                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12006                         tg3_flag_set(tp, NVRAM_BUFFERED);
12007                         break;
12008                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12009                         tp->nvram_jedecnum = JEDEC_ATMEL;
12010                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12011                         break;
12012                 case FLASH_VENDOR_ATMEL_EEPROM:
12013                         tp->nvram_jedecnum = JEDEC_ATMEL;
12014                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12015                         tg3_flag_set(tp, NVRAM_BUFFERED);
12016                         break;
12017                 case FLASH_VENDOR_ST:
12018                         tp->nvram_jedecnum = JEDEC_ST;
12019                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12020                         tg3_flag_set(tp, NVRAM_BUFFERED);
12021                         break;
12022                 case FLASH_VENDOR_SAIFUN:
12023                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12024                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12025                         break;
12026                 case FLASH_VENDOR_SST_SMALL:
12027                 case FLASH_VENDOR_SST_LARGE:
12028                         tp->nvram_jedecnum = JEDEC_SST;
12029                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12030                         break;
12031                 }
12032         } else {
12033                 tp->nvram_jedecnum = JEDEC_ATMEL;
12034                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12035                 tg3_flag_set(tp, NVRAM_BUFFERED);
12036         }
12037 }
12038
12039 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12040 {
12041         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12042         case FLASH_5752PAGE_SIZE_256:
12043                 tp->nvram_pagesize = 256;
12044                 break;
12045         case FLASH_5752PAGE_SIZE_512:
12046                 tp->nvram_pagesize = 512;
12047                 break;
12048         case FLASH_5752PAGE_SIZE_1K:
12049                 tp->nvram_pagesize = 1024;
12050                 break;
12051         case FLASH_5752PAGE_SIZE_2K:
12052                 tp->nvram_pagesize = 2048;
12053                 break;
12054         case FLASH_5752PAGE_SIZE_4K:
12055                 tp->nvram_pagesize = 4096;
12056                 break;
12057         case FLASH_5752PAGE_SIZE_264:
12058                 tp->nvram_pagesize = 264;
12059                 break;
12060         case FLASH_5752PAGE_SIZE_528:
12061                 tp->nvram_pagesize = 528;
12062                 break;
12063         }
12064 }
12065
12066 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12067 {
12068         u32 nvcfg1;
12069
12070         nvcfg1 = tr32(NVRAM_CFG1);
12071
12072         /* NVRAM protection for TPM */
12073         if (nvcfg1 & (1 << 27))
12074                 tg3_flag_set(tp, PROTECTED_NVRAM);
12075
12076         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12077         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12078         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12079                 tp->nvram_jedecnum = JEDEC_ATMEL;
12080                 tg3_flag_set(tp, NVRAM_BUFFERED);
12081                 break;
12082         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12083                 tp->nvram_jedecnum = JEDEC_ATMEL;
12084                 tg3_flag_set(tp, NVRAM_BUFFERED);
12085                 tg3_flag_set(tp, FLASH);
12086                 break;
12087         case FLASH_5752VENDOR_ST_M45PE10:
12088         case FLASH_5752VENDOR_ST_M45PE20:
12089         case FLASH_5752VENDOR_ST_M45PE40:
12090                 tp->nvram_jedecnum = JEDEC_ST;
12091                 tg3_flag_set(tp, NVRAM_BUFFERED);
12092                 tg3_flag_set(tp, FLASH);
12093                 break;
12094         }
12095
12096         if (tg3_flag(tp, FLASH)) {
12097                 tg3_nvram_get_pagesize(tp, nvcfg1);
12098         } else {
12099                 /* For eeprom, set pagesize to maximum eeprom size */
12100                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12101
12102                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12103                 tw32(NVRAM_CFG1, nvcfg1);
12104         }
12105 }
12106
12107 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12108 {
12109         u32 nvcfg1, protect = 0;
12110
12111         nvcfg1 = tr32(NVRAM_CFG1);
12112
12113         /* NVRAM protection for TPM */
12114         if (nvcfg1 & (1 << 27)) {
12115                 tg3_flag_set(tp, PROTECTED_NVRAM);
12116                 protect = 1;
12117         }
12118
12119         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12120         switch (nvcfg1) {
12121         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12122         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12123         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12124         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12125                 tp->nvram_jedecnum = JEDEC_ATMEL;
12126                 tg3_flag_set(tp, NVRAM_BUFFERED);
12127                 tg3_flag_set(tp, FLASH);
12128                 tp->nvram_pagesize = 264;
12129                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12130                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12131                         tp->nvram_size = (protect ? 0x3e200 :
12132                                           TG3_NVRAM_SIZE_512KB);
12133                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12134                         tp->nvram_size = (protect ? 0x1f200 :
12135                                           TG3_NVRAM_SIZE_256KB);
12136                 else
12137                         tp->nvram_size = (protect ? 0x1f200 :
12138                                           TG3_NVRAM_SIZE_128KB);
12139                 break;
12140         case FLASH_5752VENDOR_ST_M45PE10:
12141         case FLASH_5752VENDOR_ST_M45PE20:
12142         case FLASH_5752VENDOR_ST_M45PE40:
12143                 tp->nvram_jedecnum = JEDEC_ST;
12144                 tg3_flag_set(tp, NVRAM_BUFFERED);
12145                 tg3_flag_set(tp, FLASH);
12146                 tp->nvram_pagesize = 256;
12147                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12148                         tp->nvram_size = (protect ?
12149                                           TG3_NVRAM_SIZE_64KB :
12150                                           TG3_NVRAM_SIZE_128KB);
12151                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12152                         tp->nvram_size = (protect ?
12153                                           TG3_NVRAM_SIZE_64KB :
12154                                           TG3_NVRAM_SIZE_256KB);
12155                 else
12156                         tp->nvram_size = (protect ?
12157                                           TG3_NVRAM_SIZE_128KB :
12158                                           TG3_NVRAM_SIZE_512KB);
12159                 break;
12160         }
12161 }
12162
12163 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12164 {
12165         u32 nvcfg1;
12166
12167         nvcfg1 = tr32(NVRAM_CFG1);
12168
12169         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12170         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12171         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12172         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12173         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12174                 tp->nvram_jedecnum = JEDEC_ATMEL;
12175                 tg3_flag_set(tp, NVRAM_BUFFERED);
12176                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12177
12178                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12179                 tw32(NVRAM_CFG1, nvcfg1);
12180                 break;
12181         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12182         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12183         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12184         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12185                 tp->nvram_jedecnum = JEDEC_ATMEL;
12186                 tg3_flag_set(tp, NVRAM_BUFFERED);
12187                 tg3_flag_set(tp, FLASH);
12188                 tp->nvram_pagesize = 264;
12189                 break;
12190         case FLASH_5752VENDOR_ST_M45PE10:
12191         case FLASH_5752VENDOR_ST_M45PE20:
12192         case FLASH_5752VENDOR_ST_M45PE40:
12193                 tp->nvram_jedecnum = JEDEC_ST;
12194                 tg3_flag_set(tp, NVRAM_BUFFERED);
12195                 tg3_flag_set(tp, FLASH);
12196                 tp->nvram_pagesize = 256;
12197                 break;
12198         }
12199 }
12200
12201 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12202 {
12203         u32 nvcfg1, protect = 0;
12204
12205         nvcfg1 = tr32(NVRAM_CFG1);
12206
12207         /* NVRAM protection for TPM */
12208         if (nvcfg1 & (1 << 27)) {
12209                 tg3_flag_set(tp, PROTECTED_NVRAM);
12210                 protect = 1;
12211         }
12212
12213         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12214         switch (nvcfg1) {
12215         case FLASH_5761VENDOR_ATMEL_ADB021D:
12216         case FLASH_5761VENDOR_ATMEL_ADB041D:
12217         case FLASH_5761VENDOR_ATMEL_ADB081D:
12218         case FLASH_5761VENDOR_ATMEL_ADB161D:
12219         case FLASH_5761VENDOR_ATMEL_MDB021D:
12220         case FLASH_5761VENDOR_ATMEL_MDB041D:
12221         case FLASH_5761VENDOR_ATMEL_MDB081D:
12222         case FLASH_5761VENDOR_ATMEL_MDB161D:
12223                 tp->nvram_jedecnum = JEDEC_ATMEL;
12224                 tg3_flag_set(tp, NVRAM_BUFFERED);
12225                 tg3_flag_set(tp, FLASH);
12226                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12227                 tp->nvram_pagesize = 256;
12228                 break;
12229         case FLASH_5761VENDOR_ST_A_M45PE20:
12230         case FLASH_5761VENDOR_ST_A_M45PE40:
12231         case FLASH_5761VENDOR_ST_A_M45PE80:
12232         case FLASH_5761VENDOR_ST_A_M45PE16:
12233         case FLASH_5761VENDOR_ST_M_M45PE20:
12234         case FLASH_5761VENDOR_ST_M_M45PE40:
12235         case FLASH_5761VENDOR_ST_M_M45PE80:
12236         case FLASH_5761VENDOR_ST_M_M45PE16:
12237                 tp->nvram_jedecnum = JEDEC_ST;
12238                 tg3_flag_set(tp, NVRAM_BUFFERED);
12239                 tg3_flag_set(tp, FLASH);
12240                 tp->nvram_pagesize = 256;
12241                 break;
12242         }
12243
12244         if (protect) {
12245                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12246         } else {
12247                 switch (nvcfg1) {
12248                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12249                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12250                 case FLASH_5761VENDOR_ST_A_M45PE16:
12251                 case FLASH_5761VENDOR_ST_M_M45PE16:
12252                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12253                         break;
12254                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12255                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12256                 case FLASH_5761VENDOR_ST_A_M45PE80:
12257                 case FLASH_5761VENDOR_ST_M_M45PE80:
12258                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12259                         break;
12260                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12261                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12262                 case FLASH_5761VENDOR_ST_A_M45PE40:
12263                 case FLASH_5761VENDOR_ST_M_M45PE40:
12264                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12265                         break;
12266                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12267                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12268                 case FLASH_5761VENDOR_ST_A_M45PE20:
12269                 case FLASH_5761VENDOR_ST_M_M45PE20:
12270                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12271                         break;
12272                 }
12273         }
12274 }
12275
12276 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12277 {
12278         tp->nvram_jedecnum = JEDEC_ATMEL;
12279         tg3_flag_set(tp, NVRAM_BUFFERED);
12280         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12281 }
12282
12283 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12284 {
12285         u32 nvcfg1;
12286
12287         nvcfg1 = tr32(NVRAM_CFG1);
12288
12289         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12290         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12291         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12292                 tp->nvram_jedecnum = JEDEC_ATMEL;
12293                 tg3_flag_set(tp, NVRAM_BUFFERED);
12294                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12295
12296                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12297                 tw32(NVRAM_CFG1, nvcfg1);
12298                 return;
12299         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12300         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12301         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12302         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12303         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12304         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12305         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12306                 tp->nvram_jedecnum = JEDEC_ATMEL;
12307                 tg3_flag_set(tp, NVRAM_BUFFERED);
12308                 tg3_flag_set(tp, FLASH);
12309
12310                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12311                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12312                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12313                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12314                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12315                         break;
12316                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12317                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12318                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12319                         break;
12320                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12321                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12322                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12323                         break;
12324                 }
12325                 break;
12326         case FLASH_5752VENDOR_ST_M45PE10:
12327         case FLASH_5752VENDOR_ST_M45PE20:
12328         case FLASH_5752VENDOR_ST_M45PE40:
12329                 tp->nvram_jedecnum = JEDEC_ST;
12330                 tg3_flag_set(tp, NVRAM_BUFFERED);
12331                 tg3_flag_set(tp, FLASH);
12332
12333                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12334                 case FLASH_5752VENDOR_ST_M45PE10:
12335                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12336                         break;
12337                 case FLASH_5752VENDOR_ST_M45PE20:
12338                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12339                         break;
12340                 case FLASH_5752VENDOR_ST_M45PE40:
12341                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12342                         break;
12343                 }
12344                 break;
12345         default:
12346                 tg3_flag_set(tp, NO_NVRAM);
12347                 return;
12348         }
12349
12350         tg3_nvram_get_pagesize(tp, nvcfg1);
12351         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12352                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12353 }
12354
12355
12356 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12357 {
12358         u32 nvcfg1;
12359
12360         nvcfg1 = tr32(NVRAM_CFG1);
12361
12362         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12363         case FLASH_5717VENDOR_ATMEL_EEPROM:
12364         case FLASH_5717VENDOR_MICRO_EEPROM:
12365                 tp->nvram_jedecnum = JEDEC_ATMEL;
12366                 tg3_flag_set(tp, NVRAM_BUFFERED);
12367                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12368
12369                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12370                 tw32(NVRAM_CFG1, nvcfg1);
12371                 return;
12372         case FLASH_5717VENDOR_ATMEL_MDB011D:
12373         case FLASH_5717VENDOR_ATMEL_ADB011B:
12374         case FLASH_5717VENDOR_ATMEL_ADB011D:
12375         case FLASH_5717VENDOR_ATMEL_MDB021D:
12376         case FLASH_5717VENDOR_ATMEL_ADB021B:
12377         case FLASH_5717VENDOR_ATMEL_ADB021D:
12378         case FLASH_5717VENDOR_ATMEL_45USPT:
12379                 tp->nvram_jedecnum = JEDEC_ATMEL;
12380                 tg3_flag_set(tp, NVRAM_BUFFERED);
12381                 tg3_flag_set(tp, FLASH);
12382
12383                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12384                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12385                         /* Detect size with tg3_nvram_get_size() */
12386                         break;
12387                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12388                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12389                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12390                         break;
12391                 default:
12392                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12393                         break;
12394                 }
12395                 break;
12396         case FLASH_5717VENDOR_ST_M_M25PE10:
12397         case FLASH_5717VENDOR_ST_A_M25PE10:
12398         case FLASH_5717VENDOR_ST_M_M45PE10:
12399         case FLASH_5717VENDOR_ST_A_M45PE10:
12400         case FLASH_5717VENDOR_ST_M_M25PE20:
12401         case FLASH_5717VENDOR_ST_A_M25PE20:
12402         case FLASH_5717VENDOR_ST_M_M45PE20:
12403         case FLASH_5717VENDOR_ST_A_M45PE20:
12404         case FLASH_5717VENDOR_ST_25USPT:
12405         case FLASH_5717VENDOR_ST_45USPT:
12406                 tp->nvram_jedecnum = JEDEC_ST;
12407                 tg3_flag_set(tp, NVRAM_BUFFERED);
12408                 tg3_flag_set(tp, FLASH);
12409
12410                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12411                 case FLASH_5717VENDOR_ST_M_M25PE20:
12412                 case FLASH_5717VENDOR_ST_M_M45PE20:
12413                         /* Detect size with tg3_nvram_get_size() */
12414                         break;
12415                 case FLASH_5717VENDOR_ST_A_M25PE20:
12416                 case FLASH_5717VENDOR_ST_A_M45PE20:
12417                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12418                         break;
12419                 default:
12420                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12421                         break;
12422                 }
12423                 break;
12424         default:
12425                 tg3_flag_set(tp, NO_NVRAM);
12426                 return;
12427         }
12428
12429         tg3_nvram_get_pagesize(tp, nvcfg1);
12430         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12431                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12432 }
12433
12434 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12435 {
12436         u32 nvcfg1, nvmpinstrp;
12437
12438         nvcfg1 = tr32(NVRAM_CFG1);
12439         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12440
12441         switch (nvmpinstrp) {
12442         case FLASH_5720_EEPROM_HD:
12443         case FLASH_5720_EEPROM_LD:
12444                 tp->nvram_jedecnum = JEDEC_ATMEL;
12445                 tg3_flag_set(tp, NVRAM_BUFFERED);
12446
12447                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12448                 tw32(NVRAM_CFG1, nvcfg1);
12449                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12450                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12451                 else
12452                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12453                 return;
12454         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12455         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12456         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12457         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12458         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12459         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12460         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12461         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12462         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12463         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12464         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12465         case FLASH_5720VENDOR_ATMEL_45USPT:
12466                 tp->nvram_jedecnum = JEDEC_ATMEL;
12467                 tg3_flag_set(tp, NVRAM_BUFFERED);
12468                 tg3_flag_set(tp, FLASH);
12469
12470                 switch (nvmpinstrp) {
12471                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12472                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12473                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12474                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12475                         break;
12476                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12477                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12478                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12479                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12480                         break;
12481                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12482                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12483                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12484                         break;
12485                 default:
12486                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12487                         break;
12488                 }
12489                 break;
12490         case FLASH_5720VENDOR_M_ST_M25PE10:
12491         case FLASH_5720VENDOR_M_ST_M45PE10:
12492         case FLASH_5720VENDOR_A_ST_M25PE10:
12493         case FLASH_5720VENDOR_A_ST_M45PE10:
12494         case FLASH_5720VENDOR_M_ST_M25PE20:
12495         case FLASH_5720VENDOR_M_ST_M45PE20:
12496         case FLASH_5720VENDOR_A_ST_M25PE20:
12497         case FLASH_5720VENDOR_A_ST_M45PE20:
12498         case FLASH_5720VENDOR_M_ST_M25PE40:
12499         case FLASH_5720VENDOR_M_ST_M45PE40:
12500         case FLASH_5720VENDOR_A_ST_M25PE40:
12501         case FLASH_5720VENDOR_A_ST_M45PE40:
12502         case FLASH_5720VENDOR_M_ST_M25PE80:
12503         case FLASH_5720VENDOR_M_ST_M45PE80:
12504         case FLASH_5720VENDOR_A_ST_M25PE80:
12505         case FLASH_5720VENDOR_A_ST_M45PE80:
12506         case FLASH_5720VENDOR_ST_25USPT:
12507         case FLASH_5720VENDOR_ST_45USPT:
12508                 tp->nvram_jedecnum = JEDEC_ST;
12509                 tg3_flag_set(tp, NVRAM_BUFFERED);
12510                 tg3_flag_set(tp, FLASH);
12511
12512                 switch (nvmpinstrp) {
12513                 case FLASH_5720VENDOR_M_ST_M25PE20:
12514                 case FLASH_5720VENDOR_M_ST_M45PE20:
12515                 case FLASH_5720VENDOR_A_ST_M25PE20:
12516                 case FLASH_5720VENDOR_A_ST_M45PE20:
12517                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12518                         break;
12519                 case FLASH_5720VENDOR_M_ST_M25PE40:
12520                 case FLASH_5720VENDOR_M_ST_M45PE40:
12521                 case FLASH_5720VENDOR_A_ST_M25PE40:
12522                 case FLASH_5720VENDOR_A_ST_M45PE40:
12523                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12524                         break;
12525                 case FLASH_5720VENDOR_M_ST_M25PE80:
12526                 case FLASH_5720VENDOR_M_ST_M45PE80:
12527                 case FLASH_5720VENDOR_A_ST_M25PE80:
12528                 case FLASH_5720VENDOR_A_ST_M45PE80:
12529                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12530                         break;
12531                 default:
12532                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12533                         break;
12534                 }
12535                 break;
12536         default:
12537                 tg3_flag_set(tp, NO_NVRAM);
12538                 return;
12539         }
12540
12541         tg3_nvram_get_pagesize(tp, nvcfg1);
12542         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12543                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12544 }
12545
12546 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12547 static void __devinit tg3_nvram_init(struct tg3 *tp)
12548 {
12549         tw32_f(GRC_EEPROM_ADDR,
12550              (EEPROM_ADDR_FSM_RESET |
12551               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12552                EEPROM_ADDR_CLKPERD_SHIFT)));
12553
12554         msleep(1);
12555
12556         /* Enable seeprom accesses. */
12557         tw32_f(GRC_LOCAL_CTRL,
12558              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12559         udelay(100);
12560
12561         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12562             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12563                 tg3_flag_set(tp, NVRAM);
12564
12565                 if (tg3_nvram_lock(tp)) {
12566                         netdev_warn(tp->dev,
12567                                     "Cannot get nvram lock, %s failed\n",
12568                                     __func__);
12569                         return;
12570                 }
12571                 tg3_enable_nvram_access(tp);
12572
12573                 tp->nvram_size = 0;
12574
12575                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12576                         tg3_get_5752_nvram_info(tp);
12577                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12578                         tg3_get_5755_nvram_info(tp);
12579                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12580                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12581                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12582                         tg3_get_5787_nvram_info(tp);
12583                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12584                         tg3_get_5761_nvram_info(tp);
12585                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12586                         tg3_get_5906_nvram_info(tp);
12587                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12588                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12589                         tg3_get_57780_nvram_info(tp);
12590                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12591                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12592                         tg3_get_5717_nvram_info(tp);
12593                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12594                         tg3_get_5720_nvram_info(tp);
12595                 else
12596                         tg3_get_nvram_info(tp);
12597
12598                 if (tp->nvram_size == 0)
12599                         tg3_get_nvram_size(tp);
12600
12601                 tg3_disable_nvram_access(tp);
12602                 tg3_nvram_unlock(tp);
12603
12604         } else {
12605                 tg3_flag_clear(tp, NVRAM);
12606                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12607
12608                 tg3_get_eeprom_size(tp);
12609         }
12610 }
12611
12612 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12613                                     u32 offset, u32 len, u8 *buf)
12614 {
12615         int i, j, rc = 0;
12616         u32 val;
12617
12618         for (i = 0; i < len; i += 4) {
12619                 u32 addr;
12620                 __be32 data;
12621
12622                 addr = offset + i;
12623
12624                 memcpy(&data, buf + i, 4);
12625
12626                 /*
12627                  * The SEEPROM interface expects the data to always be opposite
12628                  * the native endian format.  We accomplish this by reversing
12629                  * all the operations that would have been performed on the
12630                  * data from a call to tg3_nvram_read_be32().
12631                  */
12632                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12633
12634                 val = tr32(GRC_EEPROM_ADDR);
12635                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12636
12637                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12638                         EEPROM_ADDR_READ);
12639                 tw32(GRC_EEPROM_ADDR, val |
12640                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
12641                         (addr & EEPROM_ADDR_ADDR_MASK) |
12642                         EEPROM_ADDR_START |
12643                         EEPROM_ADDR_WRITE);
12644
12645                 for (j = 0; j < 1000; j++) {
12646                         val = tr32(GRC_EEPROM_ADDR);
12647
12648                         if (val & EEPROM_ADDR_COMPLETE)
12649                                 break;
12650                         msleep(1);
12651                 }
12652                 if (!(val & EEPROM_ADDR_COMPLETE)) {
12653                         rc = -EBUSY;
12654                         break;
12655                 }
12656         }
12657
12658         return rc;
12659 }
12660
12661 /* offset and length are dword aligned */
12662 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12663                 u8 *buf)
12664 {
12665         int ret = 0;
12666         u32 pagesize = tp->nvram_pagesize;
12667         u32 pagemask = pagesize - 1;
12668         u32 nvram_cmd;
12669         u8 *tmp;
12670
12671         tmp = kmalloc(pagesize, GFP_KERNEL);
12672         if (tmp == NULL)
12673                 return -ENOMEM;
12674
12675         while (len) {
12676                 int j;
12677                 u32 phy_addr, page_off, size;
12678
12679                 phy_addr = offset & ~pagemask;
12680
12681                 for (j = 0; j < pagesize; j += 4) {
12682                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
12683                                                   (__be32 *) (tmp + j));
12684                         if (ret)
12685                                 break;
12686                 }
12687                 if (ret)
12688                         break;
12689
12690                 page_off = offset & pagemask;
12691                 size = pagesize;
12692                 if (len < size)
12693                         size = len;
12694
12695                 len -= size;
12696
12697                 memcpy(tmp + page_off, buf, size);
12698
12699                 offset = offset + (pagesize - page_off);
12700
12701                 tg3_enable_nvram_access(tp);
12702
12703                 /*
12704                  * Before we can erase the flash page, we need
12705                  * to issue a special "write enable" command.
12706                  */
12707                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12708
12709                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12710                         break;
12711
12712                 /* Erase the target page */
12713                 tw32(NVRAM_ADDR, phy_addr);
12714
12715                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12716                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12717
12718                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12719                         break;
12720
12721                 /* Issue another write enable to start the write. */
12722                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12723
12724                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12725                         break;
12726
12727                 for (j = 0; j < pagesize; j += 4) {
12728                         __be32 data;
12729
12730                         data = *((__be32 *) (tmp + j));
12731
12732                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
12733
12734                         tw32(NVRAM_ADDR, phy_addr + j);
12735
12736                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12737                                 NVRAM_CMD_WR;
12738
12739                         if (j == 0)
12740                                 nvram_cmd |= NVRAM_CMD_FIRST;
12741                         else if (j == (pagesize - 4))
12742                                 nvram_cmd |= NVRAM_CMD_LAST;
12743
12744                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12745                                 break;
12746                 }
12747                 if (ret)
12748                         break;
12749         }
12750
12751         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12752         tg3_nvram_exec_cmd(tp, nvram_cmd);
12753
12754         kfree(tmp);
12755
12756         return ret;
12757 }
12758
12759 /* offset and length are dword aligned */
12760 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12761                 u8 *buf)
12762 {
12763         int i, ret = 0;
12764
12765         for (i = 0; i < len; i += 4, offset += 4) {
12766                 u32 page_off, phy_addr, nvram_cmd;
12767                 __be32 data;
12768
12769                 memcpy(&data, buf + i, 4);
12770                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12771
12772                 page_off = offset % tp->nvram_pagesize;
12773
12774                 phy_addr = tg3_nvram_phys_addr(tp, offset);
12775
12776                 tw32(NVRAM_ADDR, phy_addr);
12777
12778                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12779
12780                 if (page_off == 0 || i == 0)
12781                         nvram_cmd |= NVRAM_CMD_FIRST;
12782                 if (page_off == (tp->nvram_pagesize - 4))
12783                         nvram_cmd |= NVRAM_CMD_LAST;
12784
12785                 if (i == (len - 4))
12786                         nvram_cmd |= NVRAM_CMD_LAST;
12787
12788                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12789                     !tg3_flag(tp, 5755_PLUS) &&
12790                     (tp->nvram_jedecnum == JEDEC_ST) &&
12791                     (nvram_cmd & NVRAM_CMD_FIRST)) {
12792
12793                         if ((ret = tg3_nvram_exec_cmd(tp,
12794                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12795                                 NVRAM_CMD_DONE)))
12796
12797                                 break;
12798                 }
12799                 if (!tg3_flag(tp, FLASH)) {
12800                         /* We always do complete word writes to eeprom. */
12801                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12802                 }
12803
12804                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12805                         break;
12806         }
12807         return ret;
12808 }
12809
12810 /* offset and length are dword aligned */
12811 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12812 {
12813         int ret;
12814
12815         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12816                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12817                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
12818                 udelay(40);
12819         }
12820
12821         if (!tg3_flag(tp, NVRAM)) {
12822                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12823         } else {
12824                 u32 grc_mode;
12825
12826                 ret = tg3_nvram_lock(tp);
12827                 if (ret)
12828                         return ret;
12829
12830                 tg3_enable_nvram_access(tp);
12831                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12832                         tw32(NVRAM_WRITE1, 0x406);
12833
12834                 grc_mode = tr32(GRC_MODE);
12835                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12836
12837                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12838                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
12839                                 buf);
12840                 } else {
12841                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12842                                 buf);
12843                 }
12844
12845                 grc_mode = tr32(GRC_MODE);
12846                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12847
12848                 tg3_disable_nvram_access(tp);
12849                 tg3_nvram_unlock(tp);
12850         }
12851
12852         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12853                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12854                 udelay(40);
12855         }
12856
12857         return ret;
12858 }
12859
12860 struct subsys_tbl_ent {
12861         u16 subsys_vendor, subsys_devid;
12862         u32 phy_id;
12863 };
12864
12865 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12866         /* Broadcom boards. */
12867         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12868           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12869         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12870           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12871         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12872           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12873         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12874           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12875         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12876           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12877         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12878           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12879         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12880           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12881         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12882           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12883         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12884           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12885         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12886           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12887         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12888           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12889
12890         /* 3com boards. */
12891         { TG3PCI_SUBVENDOR_ID_3COM,
12892           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12893         { TG3PCI_SUBVENDOR_ID_3COM,
12894           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12895         { TG3PCI_SUBVENDOR_ID_3COM,
12896           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12897         { TG3PCI_SUBVENDOR_ID_3COM,
12898           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12899         { TG3PCI_SUBVENDOR_ID_3COM,
12900           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12901
12902         /* DELL boards. */
12903         { TG3PCI_SUBVENDOR_ID_DELL,
12904           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12905         { TG3PCI_SUBVENDOR_ID_DELL,
12906           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12907         { TG3PCI_SUBVENDOR_ID_DELL,
12908           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12909         { TG3PCI_SUBVENDOR_ID_DELL,
12910           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12911
12912         /* Compaq boards. */
12913         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12914           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12915         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12916           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12917         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12918           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12919         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12920           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12921         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12922           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12923
12924         /* IBM boards. */
12925         { TG3PCI_SUBVENDOR_ID_IBM,
12926           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12927 };
12928
12929 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12930 {
12931         int i;
12932
12933         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12934                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12935                      tp->pdev->subsystem_vendor) &&
12936                     (subsys_id_to_phy_id[i].subsys_devid ==
12937                      tp->pdev->subsystem_device))
12938                         return &subsys_id_to_phy_id[i];
12939         }
12940         return NULL;
12941 }
12942
12943 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12944 {
12945         u32 val;
12946
12947         tp->phy_id = TG3_PHY_ID_INVALID;
12948         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12949
12950         /* Assume an onboard device and WOL capable by default.  */
12951         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12952         tg3_flag_set(tp, WOL_CAP);
12953
12954         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12955                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12956                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12957                         tg3_flag_set(tp, IS_NIC);
12958                 }
12959                 val = tr32(VCPU_CFGSHDW);
12960                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12961                         tg3_flag_set(tp, ASPM_WORKAROUND);
12962                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12963                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12964                         tg3_flag_set(tp, WOL_ENABLE);
12965                         device_set_wakeup_enable(&tp->pdev->dev, true);
12966                 }
12967                 goto done;
12968         }
12969
12970         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12971         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12972                 u32 nic_cfg, led_cfg;
12973                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12974                 int eeprom_phy_serdes = 0;
12975
12976                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12977                 tp->nic_sram_data_cfg = nic_cfg;
12978
12979                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12980                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12981                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12982                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12983                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
12984                     (ver > 0) && (ver < 0x100))
12985                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12986
12987                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12988                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12989
12990                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12991                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12992                         eeprom_phy_serdes = 1;
12993
12994                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12995                 if (nic_phy_id != 0) {
12996                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12997                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12998
12999                         eeprom_phy_id  = (id1 >> 16) << 10;
13000                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13001                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13002                 } else
13003                         eeprom_phy_id = 0;
13004
13005                 tp->phy_id = eeprom_phy_id;
13006                 if (eeprom_phy_serdes) {
13007                         if (!tg3_flag(tp, 5705_PLUS))
13008                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13009                         else
13010                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13011                 }
13012
13013                 if (tg3_flag(tp, 5750_PLUS))
13014                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13015                                     SHASTA_EXT_LED_MODE_MASK);
13016                 else
13017                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13018
13019                 switch (led_cfg) {
13020                 default:
13021                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13022                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13023                         break;
13024
13025                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13026                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13027                         break;
13028
13029                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13030                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13031
13032                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13033                          * read on some older 5700/5701 bootcode.
13034                          */
13035                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13036                             ASIC_REV_5700 ||
13037                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13038                             ASIC_REV_5701)
13039                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13040
13041                         break;
13042
13043                 case SHASTA_EXT_LED_SHARED:
13044                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13045                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13046                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13047                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13048                                                  LED_CTRL_MODE_PHY_2);
13049                         break;
13050
13051                 case SHASTA_EXT_LED_MAC:
13052                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13053                         break;
13054
13055                 case SHASTA_EXT_LED_COMBO:
13056                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13057                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13058                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13059                                                  LED_CTRL_MODE_PHY_2);
13060                         break;
13061
13062                 }
13063
13064                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13065                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13066                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13067                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13068
13069                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13070                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13071
13072                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13073                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13074                         if ((tp->pdev->subsystem_vendor ==
13075                              PCI_VENDOR_ID_ARIMA) &&
13076                             (tp->pdev->subsystem_device == 0x205a ||
13077                              tp->pdev->subsystem_device == 0x2063))
13078                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13079                 } else {
13080                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13081                         tg3_flag_set(tp, IS_NIC);
13082                 }
13083
13084                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13085                         tg3_flag_set(tp, ENABLE_ASF);
13086                         if (tg3_flag(tp, 5750_PLUS))
13087                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13088                 }
13089
13090                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13091                     tg3_flag(tp, 5750_PLUS))
13092                         tg3_flag_set(tp, ENABLE_APE);
13093
13094                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13095                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13096                         tg3_flag_clear(tp, WOL_CAP);
13097
13098                 if (tg3_flag(tp, WOL_CAP) &&
13099                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13100                         tg3_flag_set(tp, WOL_ENABLE);
13101                         device_set_wakeup_enable(&tp->pdev->dev, true);
13102                 }
13103
13104                 if (cfg2 & (1 << 17))
13105                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13106
13107                 /* serdes signal pre-emphasis in register 0x590 set by */
13108                 /* bootcode if bit 18 is set */
13109                 if (cfg2 & (1 << 18))
13110                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13111
13112                 if ((tg3_flag(tp, 57765_PLUS) ||
13113                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13114                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13115                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13116                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13117
13118                 if (tg3_flag(tp, PCI_EXPRESS) &&
13119                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13120                     !tg3_flag(tp, 57765_PLUS)) {
13121                         u32 cfg3;
13122
13123                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13124                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13125                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13126                 }
13127
13128                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13129                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13130                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13131                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13132                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13133                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13134         }
13135 done:
13136         if (tg3_flag(tp, WOL_CAP))
13137                 device_set_wakeup_enable(&tp->pdev->dev,
13138                                          tg3_flag(tp, WOL_ENABLE));
13139         else
13140                 device_set_wakeup_capable(&tp->pdev->dev, false);
13141 }
13142
13143 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13144 {
13145         int i;
13146         u32 val;
13147
13148         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13149         tw32(OTP_CTRL, cmd);
13150
13151         /* Wait for up to 1 ms for command to execute. */
13152         for (i = 0; i < 100; i++) {
13153                 val = tr32(OTP_STATUS);
13154                 if (val & OTP_STATUS_CMD_DONE)
13155                         break;
13156                 udelay(10);
13157         }
13158
13159         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13160 }
13161
13162 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13163  * configuration is a 32-bit value that straddles the alignment boundary.
13164  * We do two 32-bit reads and then shift and merge the results.
13165  */
13166 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13167 {
13168         u32 bhalf_otp, thalf_otp;
13169
13170         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13171
13172         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13173                 return 0;
13174
13175         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13176
13177         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13178                 return 0;
13179
13180         thalf_otp = tr32(OTP_READ_DATA);
13181
13182         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13183
13184         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13185                 return 0;
13186
13187         bhalf_otp = tr32(OTP_READ_DATA);
13188
13189         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13190 }
13191
13192 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13193 {
13194         u32 adv = ADVERTISED_Autoneg;
13195
13196         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13197                 adv |= ADVERTISED_1000baseT_Half |
13198                        ADVERTISED_1000baseT_Full;
13199
13200         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13201                 adv |= ADVERTISED_100baseT_Half |
13202                        ADVERTISED_100baseT_Full |
13203                        ADVERTISED_10baseT_Half |
13204                        ADVERTISED_10baseT_Full |
13205                        ADVERTISED_TP;
13206         else
13207                 adv |= ADVERTISED_FIBRE;
13208
13209         tp->link_config.advertising = adv;
13210         tp->link_config.speed = SPEED_INVALID;
13211         tp->link_config.duplex = DUPLEX_INVALID;
13212         tp->link_config.autoneg = AUTONEG_ENABLE;
13213         tp->link_config.active_speed = SPEED_INVALID;
13214         tp->link_config.active_duplex = DUPLEX_INVALID;
13215         tp->link_config.orig_speed = SPEED_INVALID;
13216         tp->link_config.orig_duplex = DUPLEX_INVALID;
13217         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13218 }
13219
13220 static int __devinit tg3_phy_probe(struct tg3 *tp)
13221 {
13222         u32 hw_phy_id_1, hw_phy_id_2;
13223         u32 hw_phy_id, hw_phy_id_masked;
13224         int err;
13225
13226         /* flow control autonegotiation is default behavior */
13227         tg3_flag_set(tp, PAUSE_AUTONEG);
13228         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13229
13230         if (tg3_flag(tp, USE_PHYLIB))
13231                 return tg3_phy_init(tp);
13232
13233         /* Reading the PHY ID register can conflict with ASF
13234          * firmware access to the PHY hardware.
13235          */
13236         err = 0;
13237         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13238                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13239         } else {
13240                 /* Now read the physical PHY_ID from the chip and verify
13241                  * that it is sane.  If it doesn't look good, we fall back
13242                  * to either the hard-coded table based PHY_ID and failing
13243                  * that the value found in the eeprom area.
13244                  */
13245                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13246                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13247
13248                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13249                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13250                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13251
13252                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13253         }
13254
13255         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13256                 tp->phy_id = hw_phy_id;
13257                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13258                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13259                 else
13260                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13261         } else {
13262                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13263                         /* Do nothing, phy ID already set up in
13264                          * tg3_get_eeprom_hw_cfg().
13265                          */
13266                 } else {
13267                         struct subsys_tbl_ent *p;
13268
13269                         /* No eeprom signature?  Try the hardcoded
13270                          * subsys device table.
13271                          */
13272                         p = tg3_lookup_by_subsys(tp);
13273                         if (!p)
13274                                 return -ENODEV;
13275
13276                         tp->phy_id = p->phy_id;
13277                         if (!tp->phy_id ||
13278                             tp->phy_id == TG3_PHY_ID_BCM8002)
13279                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13280                 }
13281         }
13282
13283         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13284             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13285              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13286              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13287               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13288              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13289               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13290                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13291
13292         tg3_phy_init_link_config(tp);
13293
13294         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13295             !tg3_flag(tp, ENABLE_APE) &&
13296             !tg3_flag(tp, ENABLE_ASF)) {
13297                 u32 bmsr, mask;
13298
13299                 tg3_readphy(tp, MII_BMSR, &bmsr);
13300                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13301                     (bmsr & BMSR_LSTATUS))
13302                         goto skip_phy_reset;
13303
13304                 err = tg3_phy_reset(tp);
13305                 if (err)
13306                         return err;
13307
13308                 tg3_phy_set_wirespeed(tp);
13309
13310                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13311                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13312                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13313                 if (!tg3_copper_is_advertising_all(tp, mask)) {
13314                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13315                                             tp->link_config.flowctrl);
13316
13317                         tg3_writephy(tp, MII_BMCR,
13318                                      BMCR_ANENABLE | BMCR_ANRESTART);
13319                 }
13320         }
13321
13322 skip_phy_reset:
13323         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13324                 err = tg3_init_5401phy_dsp(tp);
13325                 if (err)
13326                         return err;
13327
13328                 err = tg3_init_5401phy_dsp(tp);
13329         }
13330
13331         return err;
13332 }
13333
13334 static void __devinit tg3_read_vpd(struct tg3 *tp)
13335 {
13336         u8 *vpd_data;
13337         unsigned int block_end, rosize, len;
13338         u32 vpdlen;
13339         int j, i = 0;
13340
13341         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13342         if (!vpd_data)
13343                 goto out_no_vpd;
13344
13345         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13346         if (i < 0)
13347                 goto out_not_found;
13348
13349         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13350         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13351         i += PCI_VPD_LRDT_TAG_SIZE;
13352
13353         if (block_end > vpdlen)
13354                 goto out_not_found;
13355
13356         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13357                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13358         if (j > 0) {
13359                 len = pci_vpd_info_field_size(&vpd_data[j]);
13360
13361                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13362                 if (j + len > block_end || len != 4 ||
13363                     memcmp(&vpd_data[j], "1028", 4))
13364                         goto partno;
13365
13366                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13367                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13368                 if (j < 0)
13369                         goto partno;
13370
13371                 len = pci_vpd_info_field_size(&vpd_data[j]);
13372
13373                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13374                 if (j + len > block_end)
13375                         goto partno;
13376
13377                 memcpy(tp->fw_ver, &vpd_data[j], len);
13378                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13379         }
13380
13381 partno:
13382         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13383                                       PCI_VPD_RO_KEYWORD_PARTNO);
13384         if (i < 0)
13385                 goto out_not_found;
13386
13387         len = pci_vpd_info_field_size(&vpd_data[i]);
13388
13389         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13390         if (len > TG3_BPN_SIZE ||
13391             (len + i) > vpdlen)
13392                 goto out_not_found;
13393
13394         memcpy(tp->board_part_number, &vpd_data[i], len);
13395
13396 out_not_found:
13397         kfree(vpd_data);
13398         if (tp->board_part_number[0])
13399                 return;
13400
13401 out_no_vpd:
13402         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13403                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13404                         strcpy(tp->board_part_number, "BCM5717");
13405                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13406                         strcpy(tp->board_part_number, "BCM5718");
13407                 else
13408                         goto nomatch;
13409         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13410                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13411                         strcpy(tp->board_part_number, "BCM57780");
13412                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13413                         strcpy(tp->board_part_number, "BCM57760");
13414                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13415                         strcpy(tp->board_part_number, "BCM57790");
13416                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13417                         strcpy(tp->board_part_number, "BCM57788");
13418                 else
13419                         goto nomatch;
13420         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13421                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13422                         strcpy(tp->board_part_number, "BCM57761");
13423                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13424                         strcpy(tp->board_part_number, "BCM57765");
13425                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13426                         strcpy(tp->board_part_number, "BCM57781");
13427                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13428                         strcpy(tp->board_part_number, "BCM57785");
13429                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13430                         strcpy(tp->board_part_number, "BCM57791");
13431                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13432                         strcpy(tp->board_part_number, "BCM57795");
13433                 else
13434                         goto nomatch;
13435         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13436                 strcpy(tp->board_part_number, "BCM95906");
13437         } else {
13438 nomatch:
13439                 strcpy(tp->board_part_number, "none");
13440         }
13441 }
13442
13443 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13444 {
13445         u32 val;
13446
13447         if (tg3_nvram_read(tp, offset, &val) ||
13448             (val & 0xfc000000) != 0x0c000000 ||
13449             tg3_nvram_read(tp, offset + 4, &val) ||
13450             val != 0)
13451                 return 0;
13452
13453         return 1;
13454 }
13455
13456 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13457 {
13458         u32 val, offset, start, ver_offset;
13459         int i, dst_off;
13460         bool newver = false;
13461
13462         if (tg3_nvram_read(tp, 0xc, &offset) ||
13463             tg3_nvram_read(tp, 0x4, &start))
13464                 return;
13465
13466         offset = tg3_nvram_logical_addr(tp, offset);
13467
13468         if (tg3_nvram_read(tp, offset, &val))
13469                 return;
13470
13471         if ((val & 0xfc000000) == 0x0c000000) {
13472                 if (tg3_nvram_read(tp, offset + 4, &val))
13473                         return;
13474
13475                 if (val == 0)
13476                         newver = true;
13477         }
13478
13479         dst_off = strlen(tp->fw_ver);
13480
13481         if (newver) {
13482                 if (TG3_VER_SIZE - dst_off < 16 ||
13483                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13484                         return;
13485
13486                 offset = offset + ver_offset - start;
13487                 for (i = 0; i < 16; i += 4) {
13488                         __be32 v;
13489                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13490                                 return;
13491
13492                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13493                 }
13494         } else {
13495                 u32 major, minor;
13496
13497                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13498                         return;
13499
13500                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13501                         TG3_NVM_BCVER_MAJSFT;
13502                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13503                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13504                          "v%d.%02d", major, minor);
13505         }
13506 }
13507
13508 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13509 {
13510         u32 val, major, minor;
13511
13512         /* Use native endian representation */
13513         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13514                 return;
13515
13516         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13517                 TG3_NVM_HWSB_CFG1_MAJSFT;
13518         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13519                 TG3_NVM_HWSB_CFG1_MINSFT;
13520
13521         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13522 }
13523
13524 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13525 {
13526         u32 offset, major, minor, build;
13527
13528         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13529
13530         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13531                 return;
13532
13533         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13534         case TG3_EEPROM_SB_REVISION_0:
13535                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13536                 break;
13537         case TG3_EEPROM_SB_REVISION_2:
13538                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13539                 break;
13540         case TG3_EEPROM_SB_REVISION_3:
13541                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13542                 break;
13543         case TG3_EEPROM_SB_REVISION_4:
13544                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13545                 break;
13546         case TG3_EEPROM_SB_REVISION_5:
13547                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13548                 break;
13549         case TG3_EEPROM_SB_REVISION_6:
13550                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13551                 break;
13552         default:
13553                 return;
13554         }
13555
13556         if (tg3_nvram_read(tp, offset, &val))
13557                 return;
13558
13559         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13560                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13561         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13562                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13563         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13564
13565         if (minor > 99 || build > 26)
13566                 return;
13567
13568         offset = strlen(tp->fw_ver);
13569         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13570                  " v%d.%02d", major, minor);
13571
13572         if (build > 0) {
13573                 offset = strlen(tp->fw_ver);
13574                 if (offset < TG3_VER_SIZE - 1)
13575                         tp->fw_ver[offset] = 'a' + build - 1;
13576         }
13577 }
13578
13579 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13580 {
13581         u32 val, offset, start;
13582         int i, vlen;
13583
13584         for (offset = TG3_NVM_DIR_START;
13585              offset < TG3_NVM_DIR_END;
13586              offset += TG3_NVM_DIRENT_SIZE) {
13587                 if (tg3_nvram_read(tp, offset, &val))
13588                         return;
13589
13590                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13591                         break;
13592         }
13593
13594         if (offset == TG3_NVM_DIR_END)
13595                 return;
13596
13597         if (!tg3_flag(tp, 5705_PLUS))
13598                 start = 0x08000000;
13599         else if (tg3_nvram_read(tp, offset - 4, &start))
13600                 return;
13601
13602         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13603             !tg3_fw_img_is_valid(tp, offset) ||
13604             tg3_nvram_read(tp, offset + 8, &val))
13605                 return;
13606
13607         offset += val - start;
13608
13609         vlen = strlen(tp->fw_ver);
13610
13611         tp->fw_ver[vlen++] = ',';
13612         tp->fw_ver[vlen++] = ' ';
13613
13614         for (i = 0; i < 4; i++) {
13615                 __be32 v;
13616                 if (tg3_nvram_read_be32(tp, offset, &v))
13617                         return;
13618
13619                 offset += sizeof(v);
13620
13621                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13622                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13623                         break;
13624                 }
13625
13626                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13627                 vlen += sizeof(v);
13628         }
13629 }
13630
13631 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13632 {
13633         int vlen;
13634         u32 apedata;
13635         char *fwtype;
13636
13637         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13638                 return;
13639
13640         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13641         if (apedata != APE_SEG_SIG_MAGIC)
13642                 return;
13643
13644         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13645         if (!(apedata & APE_FW_STATUS_READY))
13646                 return;
13647
13648         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13649
13650         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13651                 tg3_flag_set(tp, APE_HAS_NCSI);
13652                 fwtype = "NCSI";
13653         } else {
13654                 fwtype = "DASH";
13655         }
13656
13657         vlen = strlen(tp->fw_ver);
13658
13659         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13660                  fwtype,
13661                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13662                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13663                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13664                  (apedata & APE_FW_VERSION_BLDMSK));
13665 }
13666
13667 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13668 {
13669         u32 val;
13670         bool vpd_vers = false;
13671
13672         if (tp->fw_ver[0] != 0)
13673                 vpd_vers = true;
13674
13675         if (tg3_flag(tp, NO_NVRAM)) {
13676                 strcat(tp->fw_ver, "sb");
13677                 return;
13678         }
13679
13680         if (tg3_nvram_read(tp, 0, &val))
13681                 return;
13682
13683         if (val == TG3_EEPROM_MAGIC)
13684                 tg3_read_bc_ver(tp);
13685         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13686                 tg3_read_sb_ver(tp, val);
13687         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13688                 tg3_read_hwsb_ver(tp);
13689         else
13690                 return;
13691
13692         if (vpd_vers)
13693                 goto done;
13694
13695         if (tg3_flag(tp, ENABLE_APE)) {
13696                 if (tg3_flag(tp, ENABLE_ASF))
13697                         tg3_read_dash_ver(tp);
13698         } else if (tg3_flag(tp, ENABLE_ASF)) {
13699                 tg3_read_mgmtfw_ver(tp);
13700         }
13701
13702 done:
13703         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13704 }
13705
13706 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13707
13708 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13709 {
13710         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13711                 return TG3_RX_RET_MAX_SIZE_5717;
13712         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13713                 return TG3_RX_RET_MAX_SIZE_5700;
13714         else
13715                 return TG3_RX_RET_MAX_SIZE_5705;
13716 }
13717
13718 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13719         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13720         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13721         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13722         { },
13723 };
13724
13725 static int __devinit tg3_get_invariants(struct tg3 *tp)
13726 {
13727         u32 misc_ctrl_reg;
13728         u32 pci_state_reg, grc_misc_cfg;
13729         u32 val;
13730         u16 pci_cmd;
13731         int err;
13732
13733         /* Force memory write invalidate off.  If we leave it on,
13734          * then on 5700_BX chips we have to enable a workaround.
13735          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13736          * to match the cacheline size.  The Broadcom driver have this
13737          * workaround but turns MWI off all the times so never uses
13738          * it.  This seems to suggest that the workaround is insufficient.
13739          */
13740         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13741         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13742         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13743
13744         /* Important! -- Make sure register accesses are byteswapped
13745          * correctly.  Also, for those chips that require it, make
13746          * sure that indirect register accesses are enabled before
13747          * the first operation.
13748          */
13749         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13750                               &misc_ctrl_reg);
13751         tp->misc_host_ctrl |= (misc_ctrl_reg &
13752                                MISC_HOST_CTRL_CHIPREV);
13753         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13754                                tp->misc_host_ctrl);
13755
13756         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13757                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13758         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13759                 u32 prod_id_asic_rev;
13760
13761                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13762                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13763                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13764                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13765                         pci_read_config_dword(tp->pdev,
13766                                               TG3PCI_GEN2_PRODID_ASICREV,
13767                                               &prod_id_asic_rev);
13768                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13769                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13770                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13771                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13772                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13773                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13774                         pci_read_config_dword(tp->pdev,
13775                                               TG3PCI_GEN15_PRODID_ASICREV,
13776                                               &prod_id_asic_rev);
13777                 else
13778                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13779                                               &prod_id_asic_rev);
13780
13781                 tp->pci_chip_rev_id = prod_id_asic_rev;
13782         }
13783
13784         /* Wrong chip ID in 5752 A0. This code can be removed later
13785          * as A0 is not in production.
13786          */
13787         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13788                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13789
13790         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13791          * we need to disable memory and use config. cycles
13792          * only to access all registers. The 5702/03 chips
13793          * can mistakenly decode the special cycles from the
13794          * ICH chipsets as memory write cycles, causing corruption
13795          * of register and memory space. Only certain ICH bridges
13796          * will drive special cycles with non-zero data during the
13797          * address phase which can fall within the 5703's address
13798          * range. This is not an ICH bug as the PCI spec allows
13799          * non-zero address during special cycles. However, only
13800          * these ICH bridges are known to drive non-zero addresses
13801          * during special cycles.
13802          *
13803          * Since special cycles do not cross PCI bridges, we only
13804          * enable this workaround if the 5703 is on the secondary
13805          * bus of these ICH bridges.
13806          */
13807         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13808             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13809                 static struct tg3_dev_id {
13810                         u32     vendor;
13811                         u32     device;
13812                         u32     rev;
13813                 } ich_chipsets[] = {
13814                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13815                           PCI_ANY_ID },
13816                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13817                           PCI_ANY_ID },
13818                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13819                           0xa },
13820                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13821                           PCI_ANY_ID },
13822                         { },
13823                 };
13824                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13825                 struct pci_dev *bridge = NULL;
13826
13827                 while (pci_id->vendor != 0) {
13828                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
13829                                                 bridge);
13830                         if (!bridge) {
13831                                 pci_id++;
13832                                 continue;
13833                         }
13834                         if (pci_id->rev != PCI_ANY_ID) {
13835                                 if (bridge->revision > pci_id->rev)
13836                                         continue;
13837                         }
13838                         if (bridge->subordinate &&
13839                             (bridge->subordinate->number ==
13840                              tp->pdev->bus->number)) {
13841                                 tg3_flag_set(tp, ICH_WORKAROUND);
13842                                 pci_dev_put(bridge);
13843                                 break;
13844                         }
13845                 }
13846         }
13847
13848         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13849                 static struct tg3_dev_id {
13850                         u32     vendor;
13851                         u32     device;
13852                 } bridge_chipsets[] = {
13853                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13854                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13855                         { },
13856                 };
13857                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13858                 struct pci_dev *bridge = NULL;
13859
13860                 while (pci_id->vendor != 0) {
13861                         bridge = pci_get_device(pci_id->vendor,
13862                                                 pci_id->device,
13863                                                 bridge);
13864                         if (!bridge) {
13865                                 pci_id++;
13866                                 continue;
13867                         }
13868                         if (bridge->subordinate &&
13869                             (bridge->subordinate->number <=
13870                              tp->pdev->bus->number) &&
13871                             (bridge->subordinate->subordinate >=
13872                              tp->pdev->bus->number)) {
13873                                 tg3_flag_set(tp, 5701_DMA_BUG);
13874                                 pci_dev_put(bridge);
13875                                 break;
13876                         }
13877                 }
13878         }
13879
13880         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13881          * DMA addresses > 40-bit. This bridge may have other additional
13882          * 57xx devices behind it in some 4-port NIC designs for example.
13883          * Any tg3 device found behind the bridge will also need the 40-bit
13884          * DMA workaround.
13885          */
13886         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13887             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13888                 tg3_flag_set(tp, 5780_CLASS);
13889                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13890                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13891         } else {
13892                 struct pci_dev *bridge = NULL;
13893
13894                 do {
13895                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13896                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
13897                                                 bridge);
13898                         if (bridge && bridge->subordinate &&
13899                             (bridge->subordinate->number <=
13900                              tp->pdev->bus->number) &&
13901                             (bridge->subordinate->subordinate >=
13902                              tp->pdev->bus->number)) {
13903                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13904                                 pci_dev_put(bridge);
13905                                 break;
13906                         }
13907                 } while (bridge);
13908         }
13909
13910         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13911             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13912                 tp->pdev_peer = tg3_find_peer(tp);
13913
13914         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13915             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13916             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13917                 tg3_flag_set(tp, 5717_PLUS);
13918
13919         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13920             tg3_flag(tp, 5717_PLUS))
13921                 tg3_flag_set(tp, 57765_PLUS);
13922
13923         /* Intentionally exclude ASIC_REV_5906 */
13924         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13925             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13926             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13927             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13928             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13929             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13930             tg3_flag(tp, 57765_PLUS))
13931                 tg3_flag_set(tp, 5755_PLUS);
13932
13933         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13934             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13935             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13936             tg3_flag(tp, 5755_PLUS) ||
13937             tg3_flag(tp, 5780_CLASS))
13938                 tg3_flag_set(tp, 5750_PLUS);
13939
13940         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13941             tg3_flag(tp, 5750_PLUS))
13942                 tg3_flag_set(tp, 5705_PLUS);
13943
13944         /* Determine TSO capabilities */
13945         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
13946                 ; /* Do nothing. HW bug. */
13947         else if (tg3_flag(tp, 57765_PLUS))
13948                 tg3_flag_set(tp, HW_TSO_3);
13949         else if (tg3_flag(tp, 5755_PLUS) ||
13950                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13951                 tg3_flag_set(tp, HW_TSO_2);
13952         else if (tg3_flag(tp, 5750_PLUS)) {
13953                 tg3_flag_set(tp, HW_TSO_1);
13954                 tg3_flag_set(tp, TSO_BUG);
13955                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13956                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13957                         tg3_flag_clear(tp, TSO_BUG);
13958         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13959                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13960                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13961                         tg3_flag_set(tp, TSO_BUG);
13962                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13963                         tp->fw_needed = FIRMWARE_TG3TSO5;
13964                 else
13965                         tp->fw_needed = FIRMWARE_TG3TSO;
13966         }
13967
13968         /* Selectively allow TSO based on operating conditions */
13969         if (tg3_flag(tp, HW_TSO_1) ||
13970             tg3_flag(tp, HW_TSO_2) ||
13971             tg3_flag(tp, HW_TSO_3) ||
13972             (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13973                 tg3_flag_set(tp, TSO_CAPABLE);
13974         else {
13975                 tg3_flag_clear(tp, TSO_CAPABLE);
13976                 tg3_flag_clear(tp, TSO_BUG);
13977                 tp->fw_needed = NULL;
13978         }
13979
13980         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13981                 tp->fw_needed = FIRMWARE_TG3;
13982
13983         tp->irq_max = 1;
13984
13985         if (tg3_flag(tp, 5750_PLUS)) {
13986                 tg3_flag_set(tp, SUPPORT_MSI);
13987                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13988                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13989                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13990                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13991                      tp->pdev_peer == tp->pdev))
13992                         tg3_flag_clear(tp, SUPPORT_MSI);
13993
13994                 if (tg3_flag(tp, 5755_PLUS) ||
13995                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13996                         tg3_flag_set(tp, 1SHOT_MSI);
13997                 }
13998
13999                 if (tg3_flag(tp, 57765_PLUS)) {
14000                         tg3_flag_set(tp, SUPPORT_MSIX);
14001                         tp->irq_max = TG3_IRQ_MAX_VECS;
14002                 }
14003         }
14004
14005         if (tg3_flag(tp, 5755_PLUS))
14006                 tg3_flag_set(tp, SHORT_DMA_BUG);
14007
14008         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14009                 tg3_flag_set(tp, 4K_FIFO_LIMIT);
14010
14011         if (tg3_flag(tp, 5717_PLUS))
14012                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14013
14014         if (tg3_flag(tp, 57765_PLUS) &&
14015             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14016                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14017
14018         if (!tg3_flag(tp, 5705_PLUS) ||
14019             tg3_flag(tp, 5780_CLASS) ||
14020             tg3_flag(tp, USE_JUMBO_BDFLAG))
14021                 tg3_flag_set(tp, JUMBO_CAPABLE);
14022
14023         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14024                               &pci_state_reg);
14025
14026         if (pci_is_pcie(tp->pdev)) {
14027                 u16 lnkctl;
14028
14029                 tg3_flag_set(tp, PCI_EXPRESS);
14030
14031                 tp->pcie_readrq = 4096;
14032                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14033                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14034                         tp->pcie_readrq = 2048;
14035
14036                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
14037
14038                 pci_read_config_word(tp->pdev,
14039                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14040                                      &lnkctl);
14041                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14042                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14043                             ASIC_REV_5906) {
14044                                 tg3_flag_clear(tp, HW_TSO_2);
14045                                 tg3_flag_clear(tp, TSO_CAPABLE);
14046                         }
14047                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14048                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14049                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14050                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14051                                 tg3_flag_set(tp, CLKREQ_BUG);
14052                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14053                         tg3_flag_set(tp, L1PLLPD_EN);
14054                 }
14055         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14056                 /* BCM5785 devices are effectively PCIe devices, and should
14057                  * follow PCIe codepaths, but do not have a PCIe capabilities
14058                  * section.
14059                  */
14060                 tg3_flag_set(tp, PCI_EXPRESS);
14061         } else if (!tg3_flag(tp, 5705_PLUS) ||
14062                    tg3_flag(tp, 5780_CLASS)) {
14063                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14064                 if (!tp->pcix_cap) {
14065                         dev_err(&tp->pdev->dev,
14066                                 "Cannot find PCI-X capability, aborting\n");
14067                         return -EIO;
14068                 }
14069
14070                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14071                         tg3_flag_set(tp, PCIX_MODE);
14072         }
14073
14074         /* If we have an AMD 762 or VIA K8T800 chipset, write
14075          * reordering to the mailbox registers done by the host
14076          * controller can cause major troubles.  We read back from
14077          * every mailbox register write to force the writes to be
14078          * posted to the chip in order.
14079          */
14080         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14081             !tg3_flag(tp, PCI_EXPRESS))
14082                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14083
14084         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14085                              &tp->pci_cacheline_sz);
14086         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14087                              &tp->pci_lat_timer);
14088         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14089             tp->pci_lat_timer < 64) {
14090                 tp->pci_lat_timer = 64;
14091                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14092                                       tp->pci_lat_timer);
14093         }
14094
14095         /* Important! -- It is critical that the PCI-X hw workaround
14096          * situation is decided before the first MMIO register access.
14097          */
14098         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14099                 /* 5700 BX chips need to have their TX producer index
14100                  * mailboxes written twice to workaround a bug.
14101                  */
14102                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14103
14104                 /* If we are in PCI-X mode, enable register write workaround.
14105                  *
14106                  * The workaround is to use indirect register accesses
14107                  * for all chip writes not to mailbox registers.
14108                  */
14109                 if (tg3_flag(tp, PCIX_MODE)) {
14110                         u32 pm_reg;
14111
14112                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14113
14114                         /* The chip can have it's power management PCI config
14115                          * space registers clobbered due to this bug.
14116                          * So explicitly force the chip into D0 here.
14117                          */
14118                         pci_read_config_dword(tp->pdev,
14119                                               tp->pm_cap + PCI_PM_CTRL,
14120                                               &pm_reg);
14121                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14122                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14123                         pci_write_config_dword(tp->pdev,
14124                                                tp->pm_cap + PCI_PM_CTRL,
14125                                                pm_reg);
14126
14127                         /* Also, force SERR#/PERR# in PCI command. */
14128                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14129                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14130                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14131                 }
14132         }
14133
14134         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14135                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14136         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14137                 tg3_flag_set(tp, PCI_32BIT);
14138
14139         /* Chip-specific fixup from Broadcom driver */
14140         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14141             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14142                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14143                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14144         }
14145
14146         /* Default fast path register access methods */
14147         tp->read32 = tg3_read32;
14148         tp->write32 = tg3_write32;
14149         tp->read32_mbox = tg3_read32;
14150         tp->write32_mbox = tg3_write32;
14151         tp->write32_tx_mbox = tg3_write32;
14152         tp->write32_rx_mbox = tg3_write32;
14153
14154         /* Various workaround register access methods */
14155         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14156                 tp->write32 = tg3_write_indirect_reg32;
14157         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14158                  (tg3_flag(tp, PCI_EXPRESS) &&
14159                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14160                 /*
14161                  * Back to back register writes can cause problems on these
14162                  * chips, the workaround is to read back all reg writes
14163                  * except those to mailbox regs.
14164                  *
14165                  * See tg3_write_indirect_reg32().
14166                  */
14167                 tp->write32 = tg3_write_flush_reg32;
14168         }
14169
14170         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14171                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14172                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14173                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14174         }
14175
14176         if (tg3_flag(tp, ICH_WORKAROUND)) {
14177                 tp->read32 = tg3_read_indirect_reg32;
14178                 tp->write32 = tg3_write_indirect_reg32;
14179                 tp->read32_mbox = tg3_read_indirect_mbox;
14180                 tp->write32_mbox = tg3_write_indirect_mbox;
14181                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14182                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14183
14184                 iounmap(tp->regs);
14185                 tp->regs = NULL;
14186
14187                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14188                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14189                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14190         }
14191         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14192                 tp->read32_mbox = tg3_read32_mbox_5906;
14193                 tp->write32_mbox = tg3_write32_mbox_5906;
14194                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14195                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14196         }
14197
14198         if (tp->write32 == tg3_write_indirect_reg32 ||
14199             (tg3_flag(tp, PCIX_MODE) &&
14200              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14201               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14202                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14203
14204         /* The memory arbiter has to be enabled in order for SRAM accesses
14205          * to succeed.  Normally on powerup the tg3 chip firmware will make
14206          * sure it is enabled, but other entities such as system netboot
14207          * code might disable it.
14208          */
14209         val = tr32(MEMARB_MODE);
14210         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14211
14212         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14213         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14214             tg3_flag(tp, 5780_CLASS)) {
14215                 if (tg3_flag(tp, PCIX_MODE)) {
14216                         pci_read_config_dword(tp->pdev,
14217                                               tp->pcix_cap + PCI_X_STATUS,
14218                                               &val);
14219                         tp->pci_fn = val & 0x7;
14220                 }
14221         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14222                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14223                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14224                     NIC_SRAM_CPMUSTAT_SIG) {
14225                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14226                         tp->pci_fn = tp->pci_fn ? 1 : 0;
14227                 }
14228         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14229                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14230                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14231                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14232                     NIC_SRAM_CPMUSTAT_SIG) {
14233                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14234                                      TG3_CPMU_STATUS_FSHFT_5719;
14235                 }
14236         }
14237
14238         /* Get eeprom hw config before calling tg3_set_power_state().
14239          * In particular, the TG3_FLAG_IS_NIC flag must be
14240          * determined before calling tg3_set_power_state() so that
14241          * we know whether or not to switch out of Vaux power.
14242          * When the flag is set, it means that GPIO1 is used for eeprom
14243          * write protect and also implies that it is a LOM where GPIOs
14244          * are not used to switch power.
14245          */
14246         tg3_get_eeprom_hw_cfg(tp);
14247
14248         if (tg3_flag(tp, ENABLE_APE)) {
14249                 /* Allow reads and writes to the
14250                  * APE register and memory space.
14251                  */
14252                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14253                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14254                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14255                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14256                                        pci_state_reg);
14257
14258                 tg3_ape_lock_init(tp);
14259         }
14260
14261         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14262             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14263             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14264             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14265             tg3_flag(tp, 57765_PLUS))
14266                 tg3_flag_set(tp, CPMU_PRESENT);
14267
14268         /* Set up tp->grc_local_ctrl before calling
14269          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14270          * will bring 5700's external PHY out of reset.
14271          * It is also used as eeprom write protect on LOMs.
14272          */
14273         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14274         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14275             tg3_flag(tp, EEPROM_WRITE_PROT))
14276                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14277                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14278         /* Unused GPIO3 must be driven as output on 5752 because there
14279          * are no pull-up resistors on unused GPIO pins.
14280          */
14281         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14282                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14283
14284         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14285             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14286             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14287                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14288
14289         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14290             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14291                 /* Turn off the debug UART. */
14292                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14293                 if (tg3_flag(tp, IS_NIC))
14294                         /* Keep VMain power. */
14295                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14296                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14297         }
14298
14299         /* Switch out of Vaux if it is a NIC */
14300         tg3_pwrsrc_switch_to_vmain(tp);
14301
14302         /* Derive initial jumbo mode from MTU assigned in
14303          * ether_setup() via the alloc_etherdev() call
14304          */
14305         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14306                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14307
14308         /* Determine WakeOnLan speed to use. */
14309         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14310             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14311             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14312             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14313                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14314         } else {
14315                 tg3_flag_set(tp, WOL_SPEED_100MB);
14316         }
14317
14318         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14319                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14320
14321         /* A few boards don't want Ethernet@WireSpeed phy feature */
14322         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14323             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14324              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14325              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14326             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14327             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14328                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14329
14330         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14331             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14332                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14333         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14334                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14335
14336         if (tg3_flag(tp, 5705_PLUS) &&
14337             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14338             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14339             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14340             !tg3_flag(tp, 57765_PLUS)) {
14341                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14342                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14343                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14344                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14345                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14346                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14347                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14348                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14349                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14350                 } else
14351                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14352         }
14353
14354         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14355             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14356                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14357                 if (tp->phy_otp == 0)
14358                         tp->phy_otp = TG3_OTP_DEFAULT;
14359         }
14360
14361         if (tg3_flag(tp, CPMU_PRESENT))
14362                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14363         else
14364                 tp->mi_mode = MAC_MI_MODE_BASE;
14365
14366         tp->coalesce_mode = 0;
14367         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14368             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14369                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14370
14371         /* Set these bits to enable statistics workaround. */
14372         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14373             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14374             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14375                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14376                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14377         }
14378
14379         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14380             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14381                 tg3_flag_set(tp, USE_PHYLIB);
14382
14383         err = tg3_mdio_init(tp);
14384         if (err)
14385                 return err;
14386
14387         /* Initialize data/descriptor byte/word swapping. */
14388         val = tr32(GRC_MODE);
14389         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14390                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14391                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14392                         GRC_MODE_B2HRX_ENABLE |
14393                         GRC_MODE_HTX2B_ENABLE |
14394                         GRC_MODE_HOST_STACKUP);
14395         else
14396                 val &= GRC_MODE_HOST_STACKUP;
14397
14398         tw32(GRC_MODE, val | tp->grc_mode);
14399
14400         tg3_switch_clocks(tp);
14401
14402         /* Clear this out for sanity. */
14403         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14404
14405         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14406                               &pci_state_reg);
14407         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14408             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14409                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14410
14411                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14412                     chiprevid == CHIPREV_ID_5701_B0 ||
14413                     chiprevid == CHIPREV_ID_5701_B2 ||
14414                     chiprevid == CHIPREV_ID_5701_B5) {
14415                         void __iomem *sram_base;
14416
14417                         /* Write some dummy words into the SRAM status block
14418                          * area, see if it reads back correctly.  If the return
14419                          * value is bad, force enable the PCIX workaround.
14420                          */
14421                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14422
14423                         writel(0x00000000, sram_base);
14424                         writel(0x00000000, sram_base + 4);
14425                         writel(0xffffffff, sram_base + 4);
14426                         if (readl(sram_base) != 0x00000000)
14427                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14428                 }
14429         }
14430
14431         udelay(50);
14432         tg3_nvram_init(tp);
14433
14434         grc_misc_cfg = tr32(GRC_MISC_CFG);
14435         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14436
14437         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14438             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14439              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14440                 tg3_flag_set(tp, IS_5788);
14441
14442         if (!tg3_flag(tp, IS_5788) &&
14443             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14444                 tg3_flag_set(tp, TAGGED_STATUS);
14445         if (tg3_flag(tp, TAGGED_STATUS)) {
14446                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14447                                       HOSTCC_MODE_CLRTICK_TXBD);
14448
14449                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14450                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14451                                        tp->misc_host_ctrl);
14452         }
14453
14454         /* Preserve the APE MAC_MODE bits */
14455         if (tg3_flag(tp, ENABLE_APE))
14456                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14457         else
14458                 tp->mac_mode = 0;
14459
14460         /* these are limited to 10/100 only */
14461         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14462              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14463             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14464              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14465              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14466               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14467               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14468             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14469              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14470               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14471               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14472             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14473             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14474             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14475             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14476                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14477
14478         err = tg3_phy_probe(tp);
14479         if (err) {
14480                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14481                 /* ... but do not return immediately ... */
14482                 tg3_mdio_fini(tp);
14483         }
14484
14485         tg3_read_vpd(tp);
14486         tg3_read_fw_ver(tp);
14487
14488         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14489                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14490         } else {
14491                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14492                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14493                 else
14494                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14495         }
14496
14497         /* 5700 {AX,BX} chips have a broken status block link
14498          * change bit implementation, so we must use the
14499          * status register in those cases.
14500          */
14501         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14502                 tg3_flag_set(tp, USE_LINKCHG_REG);
14503         else
14504                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14505
14506         /* The led_ctrl is set during tg3_phy_probe, here we might
14507          * have to force the link status polling mechanism based
14508          * upon subsystem IDs.
14509          */
14510         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14511             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14512             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14513                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14514                 tg3_flag_set(tp, USE_LINKCHG_REG);
14515         }
14516
14517         /* For all SERDES we poll the MAC status register. */
14518         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14519                 tg3_flag_set(tp, POLL_SERDES);
14520         else
14521                 tg3_flag_clear(tp, POLL_SERDES);
14522
14523         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14524         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14525         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14526             tg3_flag(tp, PCIX_MODE)) {
14527                 tp->rx_offset = NET_SKB_PAD;
14528 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14529                 tp->rx_copy_thresh = ~(u16)0;
14530 #endif
14531         }
14532
14533         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14534         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14535         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14536
14537         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14538
14539         /* Increment the rx prod index on the rx std ring by at most
14540          * 8 for these chips to workaround hw errata.
14541          */
14542         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14543             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14544             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14545                 tp->rx_std_max_post = 8;
14546
14547         if (tg3_flag(tp, ASPM_WORKAROUND))
14548                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14549                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14550
14551         return err;
14552 }
14553
14554 #ifdef CONFIG_SPARC
14555 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14556 {
14557         struct net_device *dev = tp->dev;
14558         struct pci_dev *pdev = tp->pdev;
14559         struct device_node *dp = pci_device_to_OF_node(pdev);
14560         const unsigned char *addr;
14561         int len;
14562
14563         addr = of_get_property(dp, "local-mac-address", &len);
14564         if (addr && len == 6) {
14565                 memcpy(dev->dev_addr, addr, 6);
14566                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14567                 return 0;
14568         }
14569         return -ENODEV;
14570 }
14571
14572 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14573 {
14574         struct net_device *dev = tp->dev;
14575
14576         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14577         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14578         return 0;
14579 }
14580 #endif
14581
14582 static int __devinit tg3_get_device_address(struct tg3 *tp)
14583 {
14584         struct net_device *dev = tp->dev;
14585         u32 hi, lo, mac_offset;
14586         int addr_ok = 0;
14587
14588 #ifdef CONFIG_SPARC
14589         if (!tg3_get_macaddr_sparc(tp))
14590                 return 0;
14591 #endif
14592
14593         mac_offset = 0x7c;
14594         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14595             tg3_flag(tp, 5780_CLASS)) {
14596                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14597                         mac_offset = 0xcc;
14598                 if (tg3_nvram_lock(tp))
14599                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14600                 else
14601                         tg3_nvram_unlock(tp);
14602         } else if (tg3_flag(tp, 5717_PLUS)) {
14603                 if (tp->pci_fn & 1)
14604                         mac_offset = 0xcc;
14605                 if (tp->pci_fn > 1)
14606                         mac_offset += 0x18c;
14607         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14608                 mac_offset = 0x10;
14609
14610         /* First try to get it from MAC address mailbox. */
14611         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14612         if ((hi >> 16) == 0x484b) {
14613                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14614                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14615
14616                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14617                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14618                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14619                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14620                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14621
14622                 /* Some old bootcode may report a 0 MAC address in SRAM */
14623                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14624         }
14625         if (!addr_ok) {
14626                 /* Next, try NVRAM. */
14627                 if (!tg3_flag(tp, NO_NVRAM) &&
14628                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14629                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14630                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14631                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14632                 }
14633                 /* Finally just fetch it out of the MAC control regs. */
14634                 else {
14635                         hi = tr32(MAC_ADDR_0_HIGH);
14636                         lo = tr32(MAC_ADDR_0_LOW);
14637
14638                         dev->dev_addr[5] = lo & 0xff;
14639                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14640                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14641                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14642                         dev->dev_addr[1] = hi & 0xff;
14643                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14644                 }
14645         }
14646
14647         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14648 #ifdef CONFIG_SPARC
14649                 if (!tg3_get_default_macaddr_sparc(tp))
14650                         return 0;
14651 #endif
14652                 return -EINVAL;
14653         }
14654         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14655         return 0;
14656 }
14657
14658 #define BOUNDARY_SINGLE_CACHELINE       1
14659 #define BOUNDARY_MULTI_CACHELINE        2
14660
14661 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14662 {
14663         int cacheline_size;
14664         u8 byte;
14665         int goal;
14666
14667         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14668         if (byte == 0)
14669                 cacheline_size = 1024;
14670         else
14671                 cacheline_size = (int) byte * 4;
14672
14673         /* On 5703 and later chips, the boundary bits have no
14674          * effect.
14675          */
14676         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14677             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14678             !tg3_flag(tp, PCI_EXPRESS))
14679                 goto out;
14680
14681 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14682         goal = BOUNDARY_MULTI_CACHELINE;
14683 #else
14684 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14685         goal = BOUNDARY_SINGLE_CACHELINE;
14686 #else
14687         goal = 0;
14688 #endif
14689 #endif
14690
14691         if (tg3_flag(tp, 57765_PLUS)) {
14692                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14693                 goto out;
14694         }
14695
14696         if (!goal)
14697                 goto out;
14698
14699         /* PCI controllers on most RISC systems tend to disconnect
14700          * when a device tries to burst across a cache-line boundary.
14701          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14702          *
14703          * Unfortunately, for PCI-E there are only limited
14704          * write-side controls for this, and thus for reads
14705          * we will still get the disconnects.  We'll also waste
14706          * these PCI cycles for both read and write for chips
14707          * other than 5700 and 5701 which do not implement the
14708          * boundary bits.
14709          */
14710         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14711                 switch (cacheline_size) {
14712                 case 16:
14713                 case 32:
14714                 case 64:
14715                 case 128:
14716                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14717                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14718                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14719                         } else {
14720                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14721                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14722                         }
14723                         break;
14724
14725                 case 256:
14726                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14727                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14728                         break;
14729
14730                 default:
14731                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14732                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14733                         break;
14734                 }
14735         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14736                 switch (cacheline_size) {
14737                 case 16:
14738                 case 32:
14739                 case 64:
14740                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14741                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14742                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14743                                 break;
14744                         }
14745                         /* fallthrough */
14746                 case 128:
14747                 default:
14748                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14749                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14750                         break;
14751                 }
14752         } else {
14753                 switch (cacheline_size) {
14754                 case 16:
14755                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14756                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14757                                         DMA_RWCTRL_WRITE_BNDRY_16);
14758                                 break;
14759                         }
14760                         /* fallthrough */
14761                 case 32:
14762                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14763                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14764                                         DMA_RWCTRL_WRITE_BNDRY_32);
14765                                 break;
14766                         }
14767                         /* fallthrough */
14768                 case 64:
14769                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14770                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14771                                         DMA_RWCTRL_WRITE_BNDRY_64);
14772                                 break;
14773                         }
14774                         /* fallthrough */
14775                 case 128:
14776                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14777                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14778                                         DMA_RWCTRL_WRITE_BNDRY_128);
14779                                 break;
14780                         }
14781                         /* fallthrough */
14782                 case 256:
14783                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14784                                 DMA_RWCTRL_WRITE_BNDRY_256);
14785                         break;
14786                 case 512:
14787                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14788                                 DMA_RWCTRL_WRITE_BNDRY_512);
14789                         break;
14790                 case 1024:
14791                 default:
14792                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14793                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14794                         break;
14795                 }
14796         }
14797
14798 out:
14799         return val;
14800 }
14801
14802 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14803 {
14804         struct tg3_internal_buffer_desc test_desc;
14805         u32 sram_dma_descs;
14806         int i, ret;
14807
14808         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14809
14810         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14811         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14812         tw32(RDMAC_STATUS, 0);
14813         tw32(WDMAC_STATUS, 0);
14814
14815         tw32(BUFMGR_MODE, 0);
14816         tw32(FTQ_RESET, 0);
14817
14818         test_desc.addr_hi = ((u64) buf_dma) >> 32;
14819         test_desc.addr_lo = buf_dma & 0xffffffff;
14820         test_desc.nic_mbuf = 0x00002100;
14821         test_desc.len = size;
14822
14823         /*
14824          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14825          * the *second* time the tg3 driver was getting loaded after an
14826          * initial scan.
14827          *
14828          * Broadcom tells me:
14829          *   ...the DMA engine is connected to the GRC block and a DMA
14830          *   reset may affect the GRC block in some unpredictable way...
14831          *   The behavior of resets to individual blocks has not been tested.
14832          *
14833          * Broadcom noted the GRC reset will also reset all sub-components.
14834          */
14835         if (to_device) {
14836                 test_desc.cqid_sqid = (13 << 8) | 2;
14837
14838                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14839                 udelay(40);
14840         } else {
14841                 test_desc.cqid_sqid = (16 << 8) | 7;
14842
14843                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14844                 udelay(40);
14845         }
14846         test_desc.flags = 0x00000005;
14847
14848         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14849                 u32 val;
14850
14851                 val = *(((u32 *)&test_desc) + i);
14852                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14853                                        sram_dma_descs + (i * sizeof(u32)));
14854                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14855         }
14856         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14857
14858         if (to_device)
14859                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14860         else
14861                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14862
14863         ret = -ENODEV;
14864         for (i = 0; i < 40; i++) {
14865                 u32 val;
14866
14867                 if (to_device)
14868                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14869                 else
14870                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14871                 if ((val & 0xffff) == sram_dma_descs) {
14872                         ret = 0;
14873                         break;
14874                 }
14875
14876                 udelay(100);
14877         }
14878
14879         return ret;
14880 }
14881
14882 #define TEST_BUFFER_SIZE        0x2000
14883
14884 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14885         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14886         { },
14887 };
14888
14889 static int __devinit tg3_test_dma(struct tg3 *tp)
14890 {
14891         dma_addr_t buf_dma;
14892         u32 *buf, saved_dma_rwctrl;
14893         int ret = 0;
14894
14895         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14896                                  &buf_dma, GFP_KERNEL);
14897         if (!buf) {
14898                 ret = -ENOMEM;
14899                 goto out_nofree;
14900         }
14901
14902         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14903                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14904
14905         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14906
14907         if (tg3_flag(tp, 57765_PLUS))
14908                 goto out;
14909
14910         if (tg3_flag(tp, PCI_EXPRESS)) {
14911                 /* DMA read watermark not used on PCIE */
14912                 tp->dma_rwctrl |= 0x00180000;
14913         } else if (!tg3_flag(tp, PCIX_MODE)) {
14914                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14915                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14916                         tp->dma_rwctrl |= 0x003f0000;
14917                 else
14918                         tp->dma_rwctrl |= 0x003f000f;
14919         } else {
14920                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14921                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14922                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14923                         u32 read_water = 0x7;
14924
14925                         /* If the 5704 is behind the EPB bridge, we can
14926                          * do the less restrictive ONE_DMA workaround for
14927                          * better performance.
14928                          */
14929                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14930                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14931                                 tp->dma_rwctrl |= 0x8000;
14932                         else if (ccval == 0x6 || ccval == 0x7)
14933                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14934
14935                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14936                                 read_water = 4;
14937                         /* Set bit 23 to enable PCIX hw bug fix */
14938                         tp->dma_rwctrl |=
14939                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14940                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14941                                 (1 << 23);
14942                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14943                         /* 5780 always in PCIX mode */
14944                         tp->dma_rwctrl |= 0x00144000;
14945                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14946                         /* 5714 always in PCIX mode */
14947                         tp->dma_rwctrl |= 0x00148000;
14948                 } else {
14949                         tp->dma_rwctrl |= 0x001b000f;
14950                 }
14951         }
14952
14953         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14954             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14955                 tp->dma_rwctrl &= 0xfffffff0;
14956
14957         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14958             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14959                 /* Remove this if it causes problems for some boards. */
14960                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14961
14962                 /* On 5700/5701 chips, we need to set this bit.
14963                  * Otherwise the chip will issue cacheline transactions
14964                  * to streamable DMA memory with not all the byte
14965                  * enables turned on.  This is an error on several
14966                  * RISC PCI controllers, in particular sparc64.
14967                  *
14968                  * On 5703/5704 chips, this bit has been reassigned
14969                  * a different meaning.  In particular, it is used
14970                  * on those chips to enable a PCI-X workaround.
14971                  */
14972                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14973         }
14974
14975         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14976
14977 #if 0
14978         /* Unneeded, already done by tg3_get_invariants.  */
14979         tg3_switch_clocks(tp);
14980 #endif
14981
14982         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14983             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14984                 goto out;
14985
14986         /* It is best to perform DMA test with maximum write burst size
14987          * to expose the 5700/5701 write DMA bug.
14988          */
14989         saved_dma_rwctrl = tp->dma_rwctrl;
14990         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14991         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14992
14993         while (1) {
14994                 u32 *p = buf, i;
14995
14996                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14997                         p[i] = i;
14998
14999                 /* Send the buffer to the chip. */
15000                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15001                 if (ret) {
15002                         dev_err(&tp->pdev->dev,
15003                                 "%s: Buffer write failed. err = %d\n",
15004                                 __func__, ret);
15005                         break;
15006                 }
15007
15008 #if 0
15009                 /* validate data reached card RAM correctly. */
15010                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15011                         u32 val;
15012                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15013                         if (le32_to_cpu(val) != p[i]) {
15014                                 dev_err(&tp->pdev->dev,
15015                                         "%s: Buffer corrupted on device! "
15016                                         "(%d != %d)\n", __func__, val, i);
15017                                 /* ret = -ENODEV here? */
15018                         }
15019                         p[i] = 0;
15020                 }
15021 #endif
15022                 /* Now read it back. */
15023                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15024                 if (ret) {
15025                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15026                                 "err = %d\n", __func__, ret);
15027                         break;
15028                 }
15029
15030                 /* Verify it. */
15031                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15032                         if (p[i] == i)
15033                                 continue;
15034
15035                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15036                             DMA_RWCTRL_WRITE_BNDRY_16) {
15037                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15038                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15039                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15040                                 break;
15041                         } else {
15042                                 dev_err(&tp->pdev->dev,
15043                                         "%s: Buffer corrupted on read back! "
15044                                         "(%d != %d)\n", __func__, p[i], i);
15045                                 ret = -ENODEV;
15046                                 goto out;
15047                         }
15048                 }
15049
15050                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15051                         /* Success. */
15052                         ret = 0;
15053                         break;
15054                 }
15055         }
15056         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15057             DMA_RWCTRL_WRITE_BNDRY_16) {
15058                 /* DMA test passed without adjusting DMA boundary,
15059                  * now look for chipsets that are known to expose the
15060                  * DMA bug without failing the test.
15061                  */
15062                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15063                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15064                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15065                 } else {
15066                         /* Safe to use the calculated DMA boundary. */
15067                         tp->dma_rwctrl = saved_dma_rwctrl;
15068                 }
15069
15070                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15071         }
15072
15073 out:
15074         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15075 out_nofree:
15076         return ret;
15077 }
15078
15079 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15080 {
15081         if (tg3_flag(tp, 57765_PLUS)) {
15082                 tp->bufmgr_config.mbuf_read_dma_low_water =
15083                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15084                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15085                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15086                 tp->bufmgr_config.mbuf_high_water =
15087                         DEFAULT_MB_HIGH_WATER_57765;
15088
15089                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15090                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15091                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15092                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15093                 tp->bufmgr_config.mbuf_high_water_jumbo =
15094                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15095         } else if (tg3_flag(tp, 5705_PLUS)) {
15096                 tp->bufmgr_config.mbuf_read_dma_low_water =
15097                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15098                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15099                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15100                 tp->bufmgr_config.mbuf_high_water =
15101                         DEFAULT_MB_HIGH_WATER_5705;
15102                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15103                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15104                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15105                         tp->bufmgr_config.mbuf_high_water =
15106                                 DEFAULT_MB_HIGH_WATER_5906;
15107                 }
15108
15109                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15110                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15111                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15112                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15113                 tp->bufmgr_config.mbuf_high_water_jumbo =
15114                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15115         } else {
15116                 tp->bufmgr_config.mbuf_read_dma_low_water =
15117                         DEFAULT_MB_RDMA_LOW_WATER;
15118                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15119                         DEFAULT_MB_MACRX_LOW_WATER;
15120                 tp->bufmgr_config.mbuf_high_water =
15121                         DEFAULT_MB_HIGH_WATER;
15122
15123                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15124                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15125                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15126                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15127                 tp->bufmgr_config.mbuf_high_water_jumbo =
15128                         DEFAULT_MB_HIGH_WATER_JUMBO;
15129         }
15130
15131         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15132         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15133 }
15134
15135 static char * __devinit tg3_phy_string(struct tg3 *tp)
15136 {
15137         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15138         case TG3_PHY_ID_BCM5400:        return "5400";
15139         case TG3_PHY_ID_BCM5401:        return "5401";
15140         case TG3_PHY_ID_BCM5411:        return "5411";
15141         case TG3_PHY_ID_BCM5701:        return "5701";
15142         case TG3_PHY_ID_BCM5703:        return "5703";
15143         case TG3_PHY_ID_BCM5704:        return "5704";
15144         case TG3_PHY_ID_BCM5705:        return "5705";
15145         case TG3_PHY_ID_BCM5750:        return "5750";
15146         case TG3_PHY_ID_BCM5752:        return "5752";
15147         case TG3_PHY_ID_BCM5714:        return "5714";
15148         case TG3_PHY_ID_BCM5780:        return "5780";
15149         case TG3_PHY_ID_BCM5755:        return "5755";
15150         case TG3_PHY_ID_BCM5787:        return "5787";
15151         case TG3_PHY_ID_BCM5784:        return "5784";
15152         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15153         case TG3_PHY_ID_BCM5906:        return "5906";
15154         case TG3_PHY_ID_BCM5761:        return "5761";
15155         case TG3_PHY_ID_BCM5718C:       return "5718C";
15156         case TG3_PHY_ID_BCM5718S:       return "5718S";
15157         case TG3_PHY_ID_BCM57765:       return "57765";
15158         case TG3_PHY_ID_BCM5719C:       return "5719C";
15159         case TG3_PHY_ID_BCM5720C:       return "5720C";
15160         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15161         case 0:                 return "serdes";
15162         default:                return "unknown";
15163         }
15164 }
15165
15166 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15167 {
15168         if (tg3_flag(tp, PCI_EXPRESS)) {
15169                 strcpy(str, "PCI Express");
15170                 return str;
15171         } else if (tg3_flag(tp, PCIX_MODE)) {
15172                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15173
15174                 strcpy(str, "PCIX:");
15175
15176                 if ((clock_ctrl == 7) ||
15177                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15178                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15179                         strcat(str, "133MHz");
15180                 else if (clock_ctrl == 0)
15181                         strcat(str, "33MHz");
15182                 else if (clock_ctrl == 2)
15183                         strcat(str, "50MHz");
15184                 else if (clock_ctrl == 4)
15185                         strcat(str, "66MHz");
15186                 else if (clock_ctrl == 6)
15187                         strcat(str, "100MHz");
15188         } else {
15189                 strcpy(str, "PCI:");
15190                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15191                         strcat(str, "66MHz");
15192                 else
15193                         strcat(str, "33MHz");
15194         }
15195         if (tg3_flag(tp, PCI_32BIT))
15196                 strcat(str, ":32-bit");
15197         else
15198                 strcat(str, ":64-bit");
15199         return str;
15200 }
15201
15202 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15203 {
15204         struct pci_dev *peer;
15205         unsigned int func, devnr = tp->pdev->devfn & ~7;
15206
15207         for (func = 0; func < 8; func++) {
15208                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15209                 if (peer && peer != tp->pdev)
15210                         break;
15211                 pci_dev_put(peer);
15212         }
15213         /* 5704 can be configured in single-port mode, set peer to
15214          * tp->pdev in that case.
15215          */
15216         if (!peer) {
15217                 peer = tp->pdev;
15218                 return peer;
15219         }
15220
15221         /*
15222          * We don't need to keep the refcount elevated; there's no way
15223          * to remove one half of this device without removing the other
15224          */
15225         pci_dev_put(peer);
15226
15227         return peer;
15228 }
15229
15230 static void __devinit tg3_init_coal(struct tg3 *tp)
15231 {
15232         struct ethtool_coalesce *ec = &tp->coal;
15233
15234         memset(ec, 0, sizeof(*ec));
15235         ec->cmd = ETHTOOL_GCOALESCE;
15236         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15237         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15238         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15239         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15240         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15241         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15242         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15243         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15244         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15245
15246         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15247                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15248                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15249                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15250                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15251                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15252         }
15253
15254         if (tg3_flag(tp, 5705_PLUS)) {
15255                 ec->rx_coalesce_usecs_irq = 0;
15256                 ec->tx_coalesce_usecs_irq = 0;
15257                 ec->stats_block_coalesce_usecs = 0;
15258         }
15259 }
15260
15261 static const struct net_device_ops tg3_netdev_ops = {
15262         .ndo_open               = tg3_open,
15263         .ndo_stop               = tg3_close,
15264         .ndo_start_xmit         = tg3_start_xmit,
15265         .ndo_get_stats64        = tg3_get_stats64,
15266         .ndo_validate_addr      = eth_validate_addr,
15267         .ndo_set_rx_mode        = tg3_set_rx_mode,
15268         .ndo_set_mac_address    = tg3_set_mac_addr,
15269         .ndo_do_ioctl           = tg3_ioctl,
15270         .ndo_tx_timeout         = tg3_tx_timeout,
15271         .ndo_change_mtu         = tg3_change_mtu,
15272         .ndo_fix_features       = tg3_fix_features,
15273         .ndo_set_features       = tg3_set_features,
15274 #ifdef CONFIG_NET_POLL_CONTROLLER
15275         .ndo_poll_controller    = tg3_poll_controller,
15276 #endif
15277 };
15278
15279 static int __devinit tg3_init_one(struct pci_dev *pdev,
15280                                   const struct pci_device_id *ent)
15281 {
15282         struct net_device *dev;
15283         struct tg3 *tp;
15284         int i, err, pm_cap;
15285         u32 sndmbx, rcvmbx, intmbx;
15286         char str[40];
15287         u64 dma_mask, persist_dma_mask;
15288         netdev_features_t features = 0;
15289
15290         printk_once(KERN_INFO "%s\n", version);
15291
15292         err = pci_enable_device(pdev);
15293         if (err) {
15294                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15295                 return err;
15296         }
15297
15298         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15299         if (err) {
15300                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15301                 goto err_out_disable_pdev;
15302         }
15303
15304         pci_set_master(pdev);
15305
15306         /* Find power-management capability. */
15307         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15308         if (pm_cap == 0) {
15309                 dev_err(&pdev->dev,
15310                         "Cannot find Power Management capability, aborting\n");
15311                 err = -EIO;
15312                 goto err_out_free_res;
15313         }
15314
15315         err = pci_set_power_state(pdev, PCI_D0);
15316         if (err) {
15317                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15318                 goto err_out_free_res;
15319         }
15320
15321         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15322         if (!dev) {
15323                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15324                 err = -ENOMEM;
15325                 goto err_out_power_down;
15326         }
15327
15328         SET_NETDEV_DEV(dev, &pdev->dev);
15329
15330         tp = netdev_priv(dev);
15331         tp->pdev = pdev;
15332         tp->dev = dev;
15333         tp->pm_cap = pm_cap;
15334         tp->rx_mode = TG3_DEF_RX_MODE;
15335         tp->tx_mode = TG3_DEF_TX_MODE;
15336
15337         if (tg3_debug > 0)
15338                 tp->msg_enable = tg3_debug;
15339         else
15340                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15341
15342         /* The word/byte swap controls here control register access byte
15343          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15344          * setting below.
15345          */
15346         tp->misc_host_ctrl =
15347                 MISC_HOST_CTRL_MASK_PCI_INT |
15348                 MISC_HOST_CTRL_WORD_SWAP |
15349                 MISC_HOST_CTRL_INDIR_ACCESS |
15350                 MISC_HOST_CTRL_PCISTATE_RW;
15351
15352         /* The NONFRM (non-frame) byte/word swap controls take effect
15353          * on descriptor entries, anything which isn't packet data.
15354          *
15355          * The StrongARM chips on the board (one for tx, one for rx)
15356          * are running in big-endian mode.
15357          */
15358         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15359                         GRC_MODE_WSWAP_NONFRM_DATA);
15360 #ifdef __BIG_ENDIAN
15361         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15362 #endif
15363         spin_lock_init(&tp->lock);
15364         spin_lock_init(&tp->indirect_lock);
15365         INIT_WORK(&tp->reset_task, tg3_reset_task);
15366
15367         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15368         if (!tp->regs) {
15369                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15370                 err = -ENOMEM;
15371                 goto err_out_free_dev;
15372         }
15373
15374         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15375             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15376             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15377             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15378             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15379             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15380             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15381             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15382                 tg3_flag_set(tp, ENABLE_APE);
15383                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15384                 if (!tp->aperegs) {
15385                         dev_err(&pdev->dev,
15386                                 "Cannot map APE registers, aborting\n");
15387                         err = -ENOMEM;
15388                         goto err_out_iounmap;
15389                 }
15390         }
15391
15392         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15393         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15394
15395         dev->ethtool_ops = &tg3_ethtool_ops;
15396         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15397         dev->netdev_ops = &tg3_netdev_ops;
15398         dev->irq = pdev->irq;
15399
15400         err = tg3_get_invariants(tp);
15401         if (err) {
15402                 dev_err(&pdev->dev,
15403                         "Problem fetching invariants of chip, aborting\n");
15404                 goto err_out_apeunmap;
15405         }
15406
15407         /* The EPB bridge inside 5714, 5715, and 5780 and any
15408          * device behind the EPB cannot support DMA addresses > 40-bit.
15409          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15410          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15411          * do DMA address check in tg3_start_xmit().
15412          */
15413         if (tg3_flag(tp, IS_5788))
15414                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15415         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15416                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15417 #ifdef CONFIG_HIGHMEM
15418                 dma_mask = DMA_BIT_MASK(64);
15419 #endif
15420         } else
15421                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15422
15423         /* Configure DMA attributes. */
15424         if (dma_mask > DMA_BIT_MASK(32)) {
15425                 err = pci_set_dma_mask(pdev, dma_mask);
15426                 if (!err) {
15427                         features |= NETIF_F_HIGHDMA;
15428                         err = pci_set_consistent_dma_mask(pdev,
15429                                                           persist_dma_mask);
15430                         if (err < 0) {
15431                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15432                                         "DMA for consistent allocations\n");
15433                                 goto err_out_apeunmap;
15434                         }
15435                 }
15436         }
15437         if (err || dma_mask == DMA_BIT_MASK(32)) {
15438                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15439                 if (err) {
15440                         dev_err(&pdev->dev,
15441                                 "No usable DMA configuration, aborting\n");
15442                         goto err_out_apeunmap;
15443                 }
15444         }
15445
15446         tg3_init_bufmgr_config(tp);
15447
15448         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15449
15450         /* 5700 B0 chips do not support checksumming correctly due
15451          * to hardware bugs.
15452          */
15453         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15454                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15455
15456                 if (tg3_flag(tp, 5755_PLUS))
15457                         features |= NETIF_F_IPV6_CSUM;
15458         }
15459
15460         /* TSO is on by default on chips that support hardware TSO.
15461          * Firmware TSO on older chips gives lower performance, so it
15462          * is off by default, but can be enabled using ethtool.
15463          */
15464         if ((tg3_flag(tp, HW_TSO_1) ||
15465              tg3_flag(tp, HW_TSO_2) ||
15466              tg3_flag(tp, HW_TSO_3)) &&
15467             (features & NETIF_F_IP_CSUM))
15468                 features |= NETIF_F_TSO;
15469         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15470                 if (features & NETIF_F_IPV6_CSUM)
15471                         features |= NETIF_F_TSO6;
15472                 if (tg3_flag(tp, HW_TSO_3) ||
15473                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15474                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15475                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15476                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15477                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15478                         features |= NETIF_F_TSO_ECN;
15479         }
15480
15481         dev->features |= features;
15482         dev->vlan_features |= features;
15483
15484         /*
15485          * Add loopback capability only for a subset of devices that support
15486          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15487          * loopback for the remaining devices.
15488          */
15489         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15490             !tg3_flag(tp, CPMU_PRESENT))
15491                 /* Add the loopback capability */
15492                 features |= NETIF_F_LOOPBACK;
15493
15494         dev->hw_features |= features;
15495
15496         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15497             !tg3_flag(tp, TSO_CAPABLE) &&
15498             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15499                 tg3_flag_set(tp, MAX_RXPEND_64);
15500                 tp->rx_pending = 63;
15501         }
15502
15503         err = tg3_get_device_address(tp);
15504         if (err) {
15505                 dev_err(&pdev->dev,
15506                         "Could not obtain valid ethernet address, aborting\n");
15507                 goto err_out_apeunmap;
15508         }
15509
15510         /*
15511          * Reset chip in case UNDI or EFI driver did not shutdown
15512          * DMA self test will enable WDMAC and we'll see (spurious)
15513          * pending DMA on the PCI bus at that point.
15514          */
15515         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15516             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15517                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15518                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15519         }
15520
15521         err = tg3_test_dma(tp);
15522         if (err) {
15523                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15524                 goto err_out_apeunmap;
15525         }
15526
15527         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15528         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15529         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15530         for (i = 0; i < tp->irq_max; i++) {
15531                 struct tg3_napi *tnapi = &tp->napi[i];
15532
15533                 tnapi->tp = tp;
15534                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15535
15536                 tnapi->int_mbox = intmbx;
15537                 if (i <= 4)
15538                         intmbx += 0x8;
15539                 else
15540                         intmbx += 0x4;
15541
15542                 tnapi->consmbox = rcvmbx;
15543                 tnapi->prodmbox = sndmbx;
15544
15545                 if (i)
15546                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15547                 else
15548                         tnapi->coal_now = HOSTCC_MODE_NOW;
15549
15550                 if (!tg3_flag(tp, SUPPORT_MSIX))
15551                         break;
15552
15553                 /*
15554                  * If we support MSIX, we'll be using RSS.  If we're using
15555                  * RSS, the first vector only handles link interrupts and the
15556                  * remaining vectors handle rx and tx interrupts.  Reuse the
15557                  * mailbox values for the next iteration.  The values we setup
15558                  * above are still useful for the single vectored mode.
15559                  */
15560                 if (!i)
15561                         continue;
15562
15563                 rcvmbx += 0x8;
15564
15565                 if (sndmbx & 0x4)
15566                         sndmbx -= 0x4;
15567                 else
15568                         sndmbx += 0xc;
15569         }
15570
15571         tg3_init_coal(tp);
15572
15573         pci_set_drvdata(pdev, dev);
15574
15575         if (tg3_flag(tp, 5717_PLUS)) {
15576                 /* Resume a low-power mode */
15577                 tg3_frob_aux_power(tp, false);
15578         }
15579
15580         err = register_netdev(dev);
15581         if (err) {
15582                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15583                 goto err_out_apeunmap;
15584         }
15585
15586         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15587                     tp->board_part_number,
15588                     tp->pci_chip_rev_id,
15589                     tg3_bus_string(tp, str),
15590                     dev->dev_addr);
15591
15592         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15593                 struct phy_device *phydev;
15594                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15595                 netdev_info(dev,
15596                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15597                             phydev->drv->name, dev_name(&phydev->dev));
15598         } else {
15599                 char *ethtype;
15600
15601                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15602                         ethtype = "10/100Base-TX";
15603                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15604                         ethtype = "1000Base-SX";
15605                 else
15606                         ethtype = "10/100/1000Base-T";
15607
15608                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15609                             "(WireSpeed[%d], EEE[%d])\n",
15610                             tg3_phy_string(tp), ethtype,
15611                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15612                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15613         }
15614
15615         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15616                     (dev->features & NETIF_F_RXCSUM) != 0,
15617                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15618                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15619                     tg3_flag(tp, ENABLE_ASF) != 0,
15620                     tg3_flag(tp, TSO_CAPABLE) != 0);
15621         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15622                     tp->dma_rwctrl,
15623                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15624                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15625
15626         pci_save_state(pdev);
15627
15628         return 0;
15629
15630 err_out_apeunmap:
15631         if (tp->aperegs) {
15632                 iounmap(tp->aperegs);
15633                 tp->aperegs = NULL;
15634         }
15635
15636 err_out_iounmap:
15637         if (tp->regs) {
15638                 iounmap(tp->regs);
15639                 tp->regs = NULL;
15640         }
15641
15642 err_out_free_dev:
15643         free_netdev(dev);
15644
15645 err_out_power_down:
15646         pci_set_power_state(pdev, PCI_D3hot);
15647
15648 err_out_free_res:
15649         pci_release_regions(pdev);
15650
15651 err_out_disable_pdev:
15652         pci_disable_device(pdev);
15653         pci_set_drvdata(pdev, NULL);
15654         return err;
15655 }
15656
15657 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15658 {
15659         struct net_device *dev = pci_get_drvdata(pdev);
15660
15661         if (dev) {
15662                 struct tg3 *tp = netdev_priv(dev);
15663
15664                 if (tp->fw)
15665                         release_firmware(tp->fw);
15666
15667                 tg3_reset_task_cancel(tp);
15668
15669                 if (tg3_flag(tp, USE_PHYLIB)) {
15670                         tg3_phy_fini(tp);
15671                         tg3_mdio_fini(tp);
15672                 }
15673
15674                 unregister_netdev(dev);
15675                 if (tp->aperegs) {
15676                         iounmap(tp->aperegs);
15677                         tp->aperegs = NULL;
15678                 }
15679                 if (tp->regs) {
15680                         iounmap(tp->regs);
15681                         tp->regs = NULL;
15682                 }
15683                 free_netdev(dev);
15684                 pci_release_regions(pdev);
15685                 pci_disable_device(pdev);
15686                 pci_set_drvdata(pdev, NULL);
15687         }
15688 }
15689
15690 #ifdef CONFIG_PM_SLEEP
15691 static int tg3_suspend(struct device *device)
15692 {
15693         struct pci_dev *pdev = to_pci_dev(device);
15694         struct net_device *dev = pci_get_drvdata(pdev);
15695         struct tg3 *tp = netdev_priv(dev);
15696         int err;
15697
15698         if (!netif_running(dev))
15699                 return 0;
15700
15701         tg3_reset_task_cancel(tp);
15702         tg3_phy_stop(tp);
15703         tg3_netif_stop(tp);
15704
15705         del_timer_sync(&tp->timer);
15706
15707         tg3_full_lock(tp, 1);
15708         tg3_disable_ints(tp);
15709         tg3_full_unlock(tp);
15710
15711         netif_device_detach(dev);
15712
15713         tg3_full_lock(tp, 0);
15714         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15715         tg3_flag_clear(tp, INIT_COMPLETE);
15716         tg3_full_unlock(tp);
15717
15718         err = tg3_power_down_prepare(tp);
15719         if (err) {
15720                 int err2;
15721
15722                 tg3_full_lock(tp, 0);
15723
15724                 tg3_flag_set(tp, INIT_COMPLETE);
15725                 err2 = tg3_restart_hw(tp, 1);
15726                 if (err2)
15727                         goto out;
15728
15729                 tp->timer.expires = jiffies + tp->timer_offset;
15730                 add_timer(&tp->timer);
15731
15732                 netif_device_attach(dev);
15733                 tg3_netif_start(tp);
15734
15735 out:
15736                 tg3_full_unlock(tp);
15737
15738                 if (!err2)
15739                         tg3_phy_start(tp);
15740         }
15741
15742         return err;
15743 }
15744
15745 static int tg3_resume(struct device *device)
15746 {
15747         struct pci_dev *pdev = to_pci_dev(device);
15748         struct net_device *dev = pci_get_drvdata(pdev);
15749         struct tg3 *tp = netdev_priv(dev);
15750         int err;
15751
15752         if (!netif_running(dev))
15753                 return 0;
15754
15755         netif_device_attach(dev);
15756
15757         tg3_full_lock(tp, 0);
15758
15759         tg3_flag_set(tp, INIT_COMPLETE);
15760         err = tg3_restart_hw(tp, 1);
15761         if (err)
15762                 goto out;
15763
15764         tp->timer.expires = jiffies + tp->timer_offset;
15765         add_timer(&tp->timer);
15766
15767         tg3_netif_start(tp);
15768
15769 out:
15770         tg3_full_unlock(tp);
15771
15772         if (!err)
15773                 tg3_phy_start(tp);
15774
15775         return err;
15776 }
15777
15778 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15779 #define TG3_PM_OPS (&tg3_pm_ops)
15780
15781 #else
15782
15783 #define TG3_PM_OPS NULL
15784
15785 #endif /* CONFIG_PM_SLEEP */
15786
15787 /**
15788  * tg3_io_error_detected - called when PCI error is detected
15789  * @pdev: Pointer to PCI device
15790  * @state: The current pci connection state
15791  *
15792  * This function is called after a PCI bus error affecting
15793  * this device has been detected.
15794  */
15795 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15796                                               pci_channel_state_t state)
15797 {
15798         struct net_device *netdev = pci_get_drvdata(pdev);
15799         struct tg3 *tp = netdev_priv(netdev);
15800         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15801
15802         netdev_info(netdev, "PCI I/O error detected\n");
15803
15804         rtnl_lock();
15805
15806         if (!netif_running(netdev))
15807                 goto done;
15808
15809         tg3_phy_stop(tp);
15810
15811         tg3_netif_stop(tp);
15812
15813         del_timer_sync(&tp->timer);
15814
15815         /* Want to make sure that the reset task doesn't run */
15816         tg3_reset_task_cancel(tp);
15817         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15818
15819         netif_device_detach(netdev);
15820
15821         /* Clean up software state, even if MMIO is blocked */
15822         tg3_full_lock(tp, 0);
15823         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15824         tg3_full_unlock(tp);
15825
15826 done:
15827         if (state == pci_channel_io_perm_failure)
15828                 err = PCI_ERS_RESULT_DISCONNECT;
15829         else
15830                 pci_disable_device(pdev);
15831
15832         rtnl_unlock();
15833
15834         return err;
15835 }
15836
15837 /**
15838  * tg3_io_slot_reset - called after the pci bus has been reset.
15839  * @pdev: Pointer to PCI device
15840  *
15841  * Restart the card from scratch, as if from a cold-boot.
15842  * At this point, the card has exprienced a hard reset,
15843  * followed by fixups by BIOS, and has its config space
15844  * set up identically to what it was at cold boot.
15845  */
15846 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15847 {
15848         struct net_device *netdev = pci_get_drvdata(pdev);
15849         struct tg3 *tp = netdev_priv(netdev);
15850         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15851         int err;
15852
15853         rtnl_lock();
15854
15855         if (pci_enable_device(pdev)) {
15856                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15857                 goto done;
15858         }
15859
15860         pci_set_master(pdev);
15861         pci_restore_state(pdev);
15862         pci_save_state(pdev);
15863
15864         if (!netif_running(netdev)) {
15865                 rc = PCI_ERS_RESULT_RECOVERED;
15866                 goto done;
15867         }
15868
15869         err = tg3_power_up(tp);
15870         if (err)
15871                 goto done;
15872
15873         rc = PCI_ERS_RESULT_RECOVERED;
15874
15875 done:
15876         rtnl_unlock();
15877
15878         return rc;
15879 }
15880
15881 /**
15882  * tg3_io_resume - called when traffic can start flowing again.
15883  * @pdev: Pointer to PCI device
15884  *
15885  * This callback is called when the error recovery driver tells
15886  * us that its OK to resume normal operation.
15887  */
15888 static void tg3_io_resume(struct pci_dev *pdev)
15889 {
15890         struct net_device *netdev = pci_get_drvdata(pdev);
15891         struct tg3 *tp = netdev_priv(netdev);
15892         int err;
15893
15894         rtnl_lock();
15895
15896         if (!netif_running(netdev))
15897                 goto done;
15898
15899         tg3_full_lock(tp, 0);
15900         tg3_flag_set(tp, INIT_COMPLETE);
15901         err = tg3_restart_hw(tp, 1);
15902         tg3_full_unlock(tp);
15903         if (err) {
15904                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15905                 goto done;
15906         }
15907
15908         netif_device_attach(netdev);
15909
15910         tp->timer.expires = jiffies + tp->timer_offset;
15911         add_timer(&tp->timer);
15912
15913         tg3_netif_start(tp);
15914
15915         tg3_phy_start(tp);
15916
15917 done:
15918         rtnl_unlock();
15919 }
15920
15921 static struct pci_error_handlers tg3_err_handler = {
15922         .error_detected = tg3_io_error_detected,
15923         .slot_reset     = tg3_io_slot_reset,
15924         .resume         = tg3_io_resume
15925 };
15926
15927 static struct pci_driver tg3_driver = {
15928         .name           = DRV_MODULE_NAME,
15929         .id_table       = tg3_pci_tbl,
15930         .probe          = tg3_init_one,
15931         .remove         = __devexit_p(tg3_remove_one),
15932         .err_handler    = &tg3_err_handler,
15933         .driver.pm      = TG3_PM_OPS,
15934 };
15935
15936 static int __init tg3_init(void)
15937 {
15938         return pci_register_driver(&tg3_driver);
15939 }
15940
15941 static void __exit tg3_cleanup(void)
15942 {
15943         pci_unregister_driver(&tg3_driver);
15944 }
15945
15946 module_init(tg3_init);
15947 module_exit(tg3_cleanup);