]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/net/ethernet/broadcom/tg3.c
tg3: Eliminate unneeded prototype
[mv-sheeva.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     122
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "December 7, 2011"
96
97 #define RESET_KIND_SHUTDOWN     0
98 #define RESET_KIND_INIT         1
99 #define RESET_KIND_SUSPEND      2
100
101 #define TG3_DEF_RX_MODE         0
102 #define TG3_DEF_TX_MODE         0
103 #define TG3_DEF_MSG_ENABLE        \
104         (NETIF_MSG_DRV          | \
105          NETIF_MSG_PROBE        | \
106          NETIF_MSG_LINK         | \
107          NETIF_MSG_TIMER        | \
108          NETIF_MSG_IFDOWN       | \
109          NETIF_MSG_IFUP         | \
110          NETIF_MSG_RX_ERR       | \
111          NETIF_MSG_TX_ERR)
112
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
114
115 /* length of time before we decide the hardware is borked,
116  * and dev->tx_timeout() should be called to fix the problem
117  */
118
119 #define TG3_TX_TIMEOUT                  (5 * HZ)
120
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU                     60
123 #define TG3_MAX_MTU(tp) \
124         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
125
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127  * You can't change the ring sizes, but you can change where you place
128  * them in the NIC onboard memory.
129  */
130 #define TG3_RX_STD_RING_SIZE(tp) \
131         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING         200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
138
139 /* Do not place this n-ring entries value into the tp struct itself,
140  * we really want to expose these constants to GCC so that modulo et
141  * al.  operations are done with shifts and masks instead of with
142  * hw multiply/modulo instructions.  Another solution would be to
143  * replace things like '% foo' with '& (foo - 1)'.
144  */
145
146 #define TG3_TX_RING_SIZE                512
147 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
148
149 #define TG3_RX_STD_RING_BYTES(tp) \
150         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
151 #define TG3_RX_JMB_RING_BYTES(tp) \
152         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
153 #define TG3_RX_RCB_RING_BYTES(tp) \
154         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
155 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
156                                  TG3_TX_RING_SIZE)
157 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
158
159 #define TG3_DMA_BYTE_ENAB               64
160
161 #define TG3_RX_STD_DMA_SZ               1536
162 #define TG3_RX_JMB_DMA_SZ               9046
163
164 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
165
166 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
167 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
168
169 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
170         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
171
172 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
173         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
174
175 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
176  * that are at least dword aligned when used in PCIX mode.  The driver
177  * works around this bug by double copying the packet.  This workaround
178  * is built into the normal double copy length check for efficiency.
179  *
180  * However, the double copy is only necessary on those architectures
181  * where unaligned memory accesses are inefficient.  For those architectures
182  * where unaligned memory accesses incur little penalty, we can reintegrate
183  * the 5701 in the normal rx path.  Doing so saves a device structure
184  * dereference by hardcoding the double copy threshold in place.
185  */
186 #define TG3_RX_COPY_THRESHOLD           256
187 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
188         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
189 #else
190         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
191 #endif
192
193 #if (NET_IP_ALIGN != 0)
194 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
195 #else
196 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
197 #endif
198
199 /* minimum number of free TX descriptors required to wake up TX process */
200 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
201 #define TG3_TX_BD_DMA_MAX_2K            2048
202 #define TG3_TX_BD_DMA_MAX_4K            4096
203
204 #define TG3_RAW_IP_ALIGN 2
205
206 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
207
208 #define FIRMWARE_TG3            "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
211
212 static char version[] __devinitdata =
213         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
214
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
222
223 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
226
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
309         {}
310 };
311
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
313
314 static const struct {
315         const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
317         { "rx_octets" },
318         { "rx_fragments" },
319         { "rx_ucast_packets" },
320         { "rx_mcast_packets" },
321         { "rx_bcast_packets" },
322         { "rx_fcs_errors" },
323         { "rx_align_errors" },
324         { "rx_xon_pause_rcvd" },
325         { "rx_xoff_pause_rcvd" },
326         { "rx_mac_ctrl_rcvd" },
327         { "rx_xoff_entered" },
328         { "rx_frame_too_long_errors" },
329         { "rx_jabbers" },
330         { "rx_undersize_packets" },
331         { "rx_in_length_errors" },
332         { "rx_out_length_errors" },
333         { "rx_64_or_less_octet_packets" },
334         { "rx_65_to_127_octet_packets" },
335         { "rx_128_to_255_octet_packets" },
336         { "rx_256_to_511_octet_packets" },
337         { "rx_512_to_1023_octet_packets" },
338         { "rx_1024_to_1522_octet_packets" },
339         { "rx_1523_to_2047_octet_packets" },
340         { "rx_2048_to_4095_octet_packets" },
341         { "rx_4096_to_8191_octet_packets" },
342         { "rx_8192_to_9022_octet_packets" },
343
344         { "tx_octets" },
345         { "tx_collisions" },
346
347         { "tx_xon_sent" },
348         { "tx_xoff_sent" },
349         { "tx_flow_control" },
350         { "tx_mac_errors" },
351         { "tx_single_collisions" },
352         { "tx_mult_collisions" },
353         { "tx_deferred" },
354         { "tx_excessive_collisions" },
355         { "tx_late_collisions" },
356         { "tx_collide_2times" },
357         { "tx_collide_3times" },
358         { "tx_collide_4times" },
359         { "tx_collide_5times" },
360         { "tx_collide_6times" },
361         { "tx_collide_7times" },
362         { "tx_collide_8times" },
363         { "tx_collide_9times" },
364         { "tx_collide_10times" },
365         { "tx_collide_11times" },
366         { "tx_collide_12times" },
367         { "tx_collide_13times" },
368         { "tx_collide_14times" },
369         { "tx_collide_15times" },
370         { "tx_ucast_packets" },
371         { "tx_mcast_packets" },
372         { "tx_bcast_packets" },
373         { "tx_carrier_sense_errors" },
374         { "tx_discards" },
375         { "tx_errors" },
376
377         { "dma_writeq_full" },
378         { "dma_write_prioq_full" },
379         { "rxbds_empty" },
380         { "rx_discards" },
381         { "rx_errors" },
382         { "rx_threshold_hit" },
383
384         { "dma_readq_full" },
385         { "dma_read_prioq_full" },
386         { "tx_comp_queue_full" },
387
388         { "ring_set_send_prod_index" },
389         { "ring_status_update" },
390         { "nic_irqs" },
391         { "nic_avoided_irqs" },
392         { "nic_tx_threshold_hit" },
393
394         { "mbuf_lwm_thresh_hit" },
395 };
396
397 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
398
399
400 static const struct {
401         const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403         { "nvram test        (online) " },
404         { "link test         (online) " },
405         { "register test     (offline)" },
406         { "memory test       (offline)" },
407         { "mac loopback test (offline)" },
408         { "phy loopback test (offline)" },
409         { "ext loopback test (offline)" },
410         { "interrupt test    (offline)" },
411 };
412
413 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
414
415
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
417 {
418         writel(val, tp->regs + off);
419 }
420
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
422 {
423         return readl(tp->regs + off);
424 }
425
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
427 {
428         writel(val, tp->aperegs + off);
429 }
430
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
432 {
433         return readl(tp->aperegs + off);
434 }
435
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
437 {
438         unsigned long flags;
439
440         spin_lock_irqsave(&tp->indirect_lock, flags);
441         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443         spin_unlock_irqrestore(&tp->indirect_lock, flags);
444 }
445
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
447 {
448         writel(val, tp->regs + off);
449         readl(tp->regs + off);
450 }
451
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
453 {
454         unsigned long flags;
455         u32 val;
456
457         spin_lock_irqsave(&tp->indirect_lock, flags);
458         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460         spin_unlock_irqrestore(&tp->indirect_lock, flags);
461         return val;
462 }
463
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470                                        TG3_64BIT_REG_LOW, val);
471                 return;
472         }
473         if (off == TG3_RX_STD_PROD_IDX_REG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475                                        TG3_64BIT_REG_LOW, val);
476                 return;
477         }
478
479         spin_lock_irqsave(&tp->indirect_lock, flags);
480         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482         spin_unlock_irqrestore(&tp->indirect_lock, flags);
483
484         /* In indirect mode when disabling interrupts, we also need
485          * to clear the interrupt bit in the GRC local ctrl register.
486          */
487         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488             (val == 0x1)) {
489                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
491         }
492 }
493
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
495 {
496         unsigned long flags;
497         u32 val;
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502         spin_unlock_irqrestore(&tp->indirect_lock, flags);
503         return val;
504 }
505
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507  * where it is unsafe to read back the register without some delay.
508  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
510  */
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
512 {
513         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514                 /* Non-posted methods */
515                 tp->write32(tp, off, val);
516         else {
517                 /* Posted method */
518                 tg3_write32(tp, off, val);
519                 if (usec_wait)
520                         udelay(usec_wait);
521                 tp->read32(tp, off);
522         }
523         /* Wait again after the read for the posted method to guarantee that
524          * the wait time is met.
525          */
526         if (usec_wait)
527                 udelay(usec_wait);
528 }
529
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
531 {
532         tp->write32_mbox(tp, off, val);
533         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534                 tp->read32_mbox(tp, off);
535 }
536
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
538 {
539         void __iomem *mbox = tp->regs + off;
540         writel(val, mbox);
541         if (tg3_flag(tp, TXD_MBOX_HWBUG))
542                 writel(val, mbox);
543         if (tg3_flag(tp, MBOX_WRITE_REORDER))
544                 readl(mbox);
545 }
546
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
548 {
549         return readl(tp->regs + off + GRCMBOX_BASE);
550 }
551
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
553 {
554         writel(val, tp->regs + off + GRCMBOX_BASE);
555 }
556
557 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
562
563 #define tw32(reg, val)                  tp->write32(tp, reg, val)
564 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg)                       tp->read32(tp, reg)
567
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
569 {
570         unsigned long flags;
571
572         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574                 return;
575
576         spin_lock_irqsave(&tp->indirect_lock, flags);
577         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
580
581                 /* Always leave this as zero. */
582                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583         } else {
584                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
586
587                 /* Always leave this as zero. */
588                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
589         }
590         spin_unlock_irqrestore(&tp->indirect_lock, flags);
591 }
592
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
594 {
595         unsigned long flags;
596
597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599                 *val = 0;
600                 return;
601         }
602
603         spin_lock_irqsave(&tp->indirect_lock, flags);
604         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
607
608                 /* Always leave this as zero. */
609                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610         } else {
611                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612                 *val = tr32(TG3PCI_MEM_WIN_DATA);
613
614                 /* Always leave this as zero. */
615                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
616         }
617         spin_unlock_irqrestore(&tp->indirect_lock, flags);
618 }
619
620 static void tg3_ape_lock_init(struct tg3 *tp)
621 {
622         int i;
623         u32 regbase, bit;
624
625         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626                 regbase = TG3_APE_LOCK_GRANT;
627         else
628                 regbase = TG3_APE_PER_LOCK_GRANT;
629
630         /* Make sure the driver hasn't any stale locks. */
631         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632                 switch (i) {
633                 case TG3_APE_LOCK_PHY0:
634                 case TG3_APE_LOCK_PHY1:
635                 case TG3_APE_LOCK_PHY2:
636                 case TG3_APE_LOCK_PHY3:
637                         bit = APE_LOCK_GRANT_DRIVER;
638                         break;
639                 default:
640                         if (!tp->pci_fn)
641                                 bit = APE_LOCK_GRANT_DRIVER;
642                         else
643                                 bit = 1 << tp->pci_fn;
644                 }
645                 tg3_ape_write32(tp, regbase + 4 * i, bit);
646         }
647
648 }
649
650 static int tg3_ape_lock(struct tg3 *tp, int locknum)
651 {
652         int i, off;
653         int ret = 0;
654         u32 status, req, gnt, bit;
655
656         if (!tg3_flag(tp, ENABLE_APE))
657                 return 0;
658
659         switch (locknum) {
660         case TG3_APE_LOCK_GPIO:
661                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
662                         return 0;
663         case TG3_APE_LOCK_GRC:
664         case TG3_APE_LOCK_MEM:
665                 if (!tp->pci_fn)
666                         bit = APE_LOCK_REQ_DRIVER;
667                 else
668                         bit = 1 << tp->pci_fn;
669                 break;
670         default:
671                 return -EINVAL;
672         }
673
674         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675                 req = TG3_APE_LOCK_REQ;
676                 gnt = TG3_APE_LOCK_GRANT;
677         } else {
678                 req = TG3_APE_PER_LOCK_REQ;
679                 gnt = TG3_APE_PER_LOCK_GRANT;
680         }
681
682         off = 4 * locknum;
683
684         tg3_ape_write32(tp, req + off, bit);
685
686         /* Wait for up to 1 millisecond to acquire lock. */
687         for (i = 0; i < 100; i++) {
688                 status = tg3_ape_read32(tp, gnt + off);
689                 if (status == bit)
690                         break;
691                 udelay(10);
692         }
693
694         if (status != bit) {
695                 /* Revoke the lock request. */
696                 tg3_ape_write32(tp, gnt + off, bit);
697                 ret = -EBUSY;
698         }
699
700         return ret;
701 }
702
703 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
704 {
705         u32 gnt, bit;
706
707         if (!tg3_flag(tp, ENABLE_APE))
708                 return;
709
710         switch (locknum) {
711         case TG3_APE_LOCK_GPIO:
712                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
713                         return;
714         case TG3_APE_LOCK_GRC:
715         case TG3_APE_LOCK_MEM:
716                 if (!tp->pci_fn)
717                         bit = APE_LOCK_GRANT_DRIVER;
718                 else
719                         bit = 1 << tp->pci_fn;
720                 break;
721         default:
722                 return;
723         }
724
725         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726                 gnt = TG3_APE_LOCK_GRANT;
727         else
728                 gnt = TG3_APE_PER_LOCK_GRANT;
729
730         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
731 }
732
733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
734 {
735         int i;
736         u32 apedata;
737
738         /* NCSI does not support APE events */
739         if (tg3_flag(tp, APE_HAS_NCSI))
740                 return;
741
742         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743         if (apedata != APE_SEG_SIG_MAGIC)
744                 return;
745
746         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747         if (!(apedata & APE_FW_STATUS_READY))
748                 return;
749
750         /* Wait for up to 1 millisecond for APE to service previous event. */
751         for (i = 0; i < 10; i++) {
752                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
753                         return;
754
755                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
756
757                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759                                         event | APE_EVENT_STATUS_EVENT_PENDING);
760
761                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
762
763                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
764                         break;
765
766                 udelay(100);
767         }
768
769         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
771 }
772
773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
774 {
775         u32 event;
776         u32 apedata;
777
778         if (!tg3_flag(tp, ENABLE_APE))
779                 return;
780
781         switch (kind) {
782         case RESET_KIND_INIT:
783                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784                                 APE_HOST_SEG_SIG_MAGIC);
785                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786                                 APE_HOST_SEG_LEN_MAGIC);
787                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792                                 APE_HOST_BEHAV_NO_PHYLOCK);
793                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794                                     TG3_APE_HOST_DRVR_STATE_START);
795
796                 event = APE_EVENT_STATUS_STATE_START;
797                 break;
798         case RESET_KIND_SHUTDOWN:
799                 /* With the interface we are currently using,
800                  * APE does not track driver state.  Wiping
801                  * out the HOST SEGMENT SIGNATURE forces
802                  * the APE to assume OS absent status.
803                  */
804                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
805
806                 if (device_may_wakeup(&tp->pdev->dev) &&
807                     tg3_flag(tp, WOL_ENABLE)) {
808                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809                                             TG3_APE_HOST_WOL_SPEED_AUTO);
810                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
811                 } else
812                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
813
814                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
815
816                 event = APE_EVENT_STATUS_STATE_UNLOAD;
817                 break;
818         case RESET_KIND_SUSPEND:
819                 event = APE_EVENT_STATUS_STATE_SUSPEND;
820                 break;
821         default:
822                 return;
823         }
824
825         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
826
827         tg3_ape_send_event(tp, event);
828 }
829
830 static void tg3_disable_ints(struct tg3 *tp)
831 {
832         int i;
833
834         tw32(TG3PCI_MISC_HOST_CTRL,
835              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836         for (i = 0; i < tp->irq_max; i++)
837                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
838 }
839
840 static void tg3_enable_ints(struct tg3 *tp)
841 {
842         int i;
843
844         tp->irq_sync = 0;
845         wmb();
846
847         tw32(TG3PCI_MISC_HOST_CTRL,
848              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
849
850         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851         for (i = 0; i < tp->irq_cnt; i++) {
852                 struct tg3_napi *tnapi = &tp->napi[i];
853
854                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855                 if (tg3_flag(tp, 1SHOT_MSI))
856                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
857
858                 tp->coal_now |= tnapi->coal_now;
859         }
860
861         /* Force an initial interrupt */
862         if (!tg3_flag(tp, TAGGED_STATUS) &&
863             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
865         else
866                 tw32(HOSTCC_MODE, tp->coal_now);
867
868         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
869 }
870
871 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
872 {
873         struct tg3 *tp = tnapi->tp;
874         struct tg3_hw_status *sblk = tnapi->hw_status;
875         unsigned int work_exists = 0;
876
877         /* check for phy events */
878         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879                 if (sblk->status & SD_STATUS_LINK_CHG)
880                         work_exists = 1;
881         }
882         /* check for RX/TX work to do */
883         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
884             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
885                 work_exists = 1;
886
887         return work_exists;
888 }
889
890 /* tg3_int_reenable
891  *  similar to tg3_enable_ints, but it accurately determines whether there
892  *  is new work pending and can return without flushing the PIO write
893  *  which reenables interrupts
894  */
895 static void tg3_int_reenable(struct tg3_napi *tnapi)
896 {
897         struct tg3 *tp = tnapi->tp;
898
899         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
900         mmiowb();
901
902         /* When doing tagged status, this work check is unnecessary.
903          * The last_tag we write above tells the chip which piece of
904          * work we've completed.
905          */
906         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
907                 tw32(HOSTCC_MODE, tp->coalesce_mode |
908                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
909 }
910
911 static void tg3_switch_clocks(struct tg3 *tp)
912 {
913         u32 clock_ctrl;
914         u32 orig_clock_ctrl;
915
916         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
917                 return;
918
919         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
920
921         orig_clock_ctrl = clock_ctrl;
922         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
923                        CLOCK_CTRL_CLKRUN_OENABLE |
924                        0x1f);
925         tp->pci_clock_ctrl = clock_ctrl;
926
927         if (tg3_flag(tp, 5705_PLUS)) {
928                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
929                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
930                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
931                 }
932         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
933                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
934                             clock_ctrl |
935                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
936                             40);
937                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
938                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
939                             40);
940         }
941         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
942 }
943
944 #define PHY_BUSY_LOOPS  5000
945
946 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
947 {
948         u32 frame_val;
949         unsigned int loops;
950         int ret;
951
952         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
953                 tw32_f(MAC_MI_MODE,
954                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
955                 udelay(80);
956         }
957
958         *val = 0x0;
959
960         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
961                       MI_COM_PHY_ADDR_MASK);
962         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
963                       MI_COM_REG_ADDR_MASK);
964         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
965
966         tw32_f(MAC_MI_COM, frame_val);
967
968         loops = PHY_BUSY_LOOPS;
969         while (loops != 0) {
970                 udelay(10);
971                 frame_val = tr32(MAC_MI_COM);
972
973                 if ((frame_val & MI_COM_BUSY) == 0) {
974                         udelay(5);
975                         frame_val = tr32(MAC_MI_COM);
976                         break;
977                 }
978                 loops -= 1;
979         }
980
981         ret = -EBUSY;
982         if (loops != 0) {
983                 *val = frame_val & MI_COM_DATA_MASK;
984                 ret = 0;
985         }
986
987         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
988                 tw32_f(MAC_MI_MODE, tp->mi_mode);
989                 udelay(80);
990         }
991
992         return ret;
993 }
994
995 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
996 {
997         u32 frame_val;
998         unsigned int loops;
999         int ret;
1000
1001         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1002             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1003                 return 0;
1004
1005         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1006                 tw32_f(MAC_MI_MODE,
1007                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1008                 udelay(80);
1009         }
1010
1011         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1012                       MI_COM_PHY_ADDR_MASK);
1013         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1014                       MI_COM_REG_ADDR_MASK);
1015         frame_val |= (val & MI_COM_DATA_MASK);
1016         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1017
1018         tw32_f(MAC_MI_COM, frame_val);
1019
1020         loops = PHY_BUSY_LOOPS;
1021         while (loops != 0) {
1022                 udelay(10);
1023                 frame_val = tr32(MAC_MI_COM);
1024                 if ((frame_val & MI_COM_BUSY) == 0) {
1025                         udelay(5);
1026                         frame_val = tr32(MAC_MI_COM);
1027                         break;
1028                 }
1029                 loops -= 1;
1030         }
1031
1032         ret = -EBUSY;
1033         if (loops != 0)
1034                 ret = 0;
1035
1036         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1037                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1038                 udelay(80);
1039         }
1040
1041         return ret;
1042 }
1043
1044 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1045 {
1046         int err;
1047
1048         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1049         if (err)
1050                 goto done;
1051
1052         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1053         if (err)
1054                 goto done;
1055
1056         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1057                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1058         if (err)
1059                 goto done;
1060
1061         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1062
1063 done:
1064         return err;
1065 }
1066
1067 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1068 {
1069         int err;
1070
1071         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1072         if (err)
1073                 goto done;
1074
1075         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1076         if (err)
1077                 goto done;
1078
1079         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1080                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1081         if (err)
1082                 goto done;
1083
1084         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1085
1086 done:
1087         return err;
1088 }
1089
1090 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1091 {
1092         int err;
1093
1094         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1095         if (!err)
1096                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1097
1098         return err;
1099 }
1100
1101 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1102 {
1103         int err;
1104
1105         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1106         if (!err)
1107                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1108
1109         return err;
1110 }
1111
1112 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1113 {
1114         int err;
1115
1116         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1117                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1118                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1119         if (!err)
1120                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1121
1122         return err;
1123 }
1124
1125 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1126 {
1127         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1128                 set |= MII_TG3_AUXCTL_MISC_WREN;
1129
1130         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1131 }
1132
1133 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1134         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1135                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1136                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1137
1138 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1139         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1141
1142 static int tg3_bmcr_reset(struct tg3 *tp)
1143 {
1144         u32 phy_control;
1145         int limit, err;
1146
1147         /* OK, reset it, and poll the BMCR_RESET bit until it
1148          * clears or we time out.
1149          */
1150         phy_control = BMCR_RESET;
1151         err = tg3_writephy(tp, MII_BMCR, phy_control);
1152         if (err != 0)
1153                 return -EBUSY;
1154
1155         limit = 5000;
1156         while (limit--) {
1157                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1158                 if (err != 0)
1159                         return -EBUSY;
1160
1161                 if ((phy_control & BMCR_RESET) == 0) {
1162                         udelay(40);
1163                         break;
1164                 }
1165                 udelay(10);
1166         }
1167         if (limit < 0)
1168                 return -EBUSY;
1169
1170         return 0;
1171 }
1172
1173 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1174 {
1175         struct tg3 *tp = bp->priv;
1176         u32 val;
1177
1178         spin_lock_bh(&tp->lock);
1179
1180         if (tg3_readphy(tp, reg, &val))
1181                 val = -EIO;
1182
1183         spin_unlock_bh(&tp->lock);
1184
1185         return val;
1186 }
1187
1188 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1189 {
1190         struct tg3 *tp = bp->priv;
1191         u32 ret = 0;
1192
1193         spin_lock_bh(&tp->lock);
1194
1195         if (tg3_writephy(tp, reg, val))
1196                 ret = -EIO;
1197
1198         spin_unlock_bh(&tp->lock);
1199
1200         return ret;
1201 }
1202
1203 static int tg3_mdio_reset(struct mii_bus *bp)
1204 {
1205         return 0;
1206 }
1207
1208 static void tg3_mdio_config_5785(struct tg3 *tp)
1209 {
1210         u32 val;
1211         struct phy_device *phydev;
1212
1213         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1214         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1215         case PHY_ID_BCM50610:
1216         case PHY_ID_BCM50610M:
1217                 val = MAC_PHYCFG2_50610_LED_MODES;
1218                 break;
1219         case PHY_ID_BCMAC131:
1220                 val = MAC_PHYCFG2_AC131_LED_MODES;
1221                 break;
1222         case PHY_ID_RTL8211C:
1223                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1224                 break;
1225         case PHY_ID_RTL8201E:
1226                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1227                 break;
1228         default:
1229                 return;
1230         }
1231
1232         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1233                 tw32(MAC_PHYCFG2, val);
1234
1235                 val = tr32(MAC_PHYCFG1);
1236                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1237                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1238                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1239                 tw32(MAC_PHYCFG1, val);
1240
1241                 return;
1242         }
1243
1244         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1245                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1246                        MAC_PHYCFG2_FMODE_MASK_MASK |
1247                        MAC_PHYCFG2_GMODE_MASK_MASK |
1248                        MAC_PHYCFG2_ACT_MASK_MASK   |
1249                        MAC_PHYCFG2_QUAL_MASK_MASK |
1250                        MAC_PHYCFG2_INBAND_ENABLE;
1251
1252         tw32(MAC_PHYCFG2, val);
1253
1254         val = tr32(MAC_PHYCFG1);
1255         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1256                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1257         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1258                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1259                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1260                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1261                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1262         }
1263         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1264                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1265         tw32(MAC_PHYCFG1, val);
1266
1267         val = tr32(MAC_EXT_RGMII_MODE);
1268         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1269                  MAC_RGMII_MODE_RX_QUALITY |
1270                  MAC_RGMII_MODE_RX_ACTIVITY |
1271                  MAC_RGMII_MODE_RX_ENG_DET |
1272                  MAC_RGMII_MODE_TX_ENABLE |
1273                  MAC_RGMII_MODE_TX_LOWPWR |
1274                  MAC_RGMII_MODE_TX_RESET);
1275         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1276                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1277                         val |= MAC_RGMII_MODE_RX_INT_B |
1278                                MAC_RGMII_MODE_RX_QUALITY |
1279                                MAC_RGMII_MODE_RX_ACTIVITY |
1280                                MAC_RGMII_MODE_RX_ENG_DET;
1281                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1282                         val |= MAC_RGMII_MODE_TX_ENABLE |
1283                                MAC_RGMII_MODE_TX_LOWPWR |
1284                                MAC_RGMII_MODE_TX_RESET;
1285         }
1286         tw32(MAC_EXT_RGMII_MODE, val);
1287 }
1288
1289 static void tg3_mdio_start(struct tg3 *tp)
1290 {
1291         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1292         tw32_f(MAC_MI_MODE, tp->mi_mode);
1293         udelay(80);
1294
1295         if (tg3_flag(tp, MDIOBUS_INITED) &&
1296             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1297                 tg3_mdio_config_5785(tp);
1298 }
1299
1300 static int tg3_mdio_init(struct tg3 *tp)
1301 {
1302         int i;
1303         u32 reg;
1304         struct phy_device *phydev;
1305
1306         if (tg3_flag(tp, 5717_PLUS)) {
1307                 u32 is_serdes;
1308
1309                 tp->phy_addr = tp->pci_fn + 1;
1310
1311                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1312                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1313                 else
1314                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1315                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1316                 if (is_serdes)
1317                         tp->phy_addr += 7;
1318         } else
1319                 tp->phy_addr = TG3_PHY_MII_ADDR;
1320
1321         tg3_mdio_start(tp);
1322
1323         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1324                 return 0;
1325
1326         tp->mdio_bus = mdiobus_alloc();
1327         if (tp->mdio_bus == NULL)
1328                 return -ENOMEM;
1329
1330         tp->mdio_bus->name     = "tg3 mdio bus";
1331         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1332                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1333         tp->mdio_bus->priv     = tp;
1334         tp->mdio_bus->parent   = &tp->pdev->dev;
1335         tp->mdio_bus->read     = &tg3_mdio_read;
1336         tp->mdio_bus->write    = &tg3_mdio_write;
1337         tp->mdio_bus->reset    = &tg3_mdio_reset;
1338         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1339         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1340
1341         for (i = 0; i < PHY_MAX_ADDR; i++)
1342                 tp->mdio_bus->irq[i] = PHY_POLL;
1343
1344         /* The bus registration will look for all the PHYs on the mdio bus.
1345          * Unfortunately, it does not ensure the PHY is powered up before
1346          * accessing the PHY ID registers.  A chip reset is the
1347          * quickest way to bring the device back to an operational state..
1348          */
1349         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1350                 tg3_bmcr_reset(tp);
1351
1352         i = mdiobus_register(tp->mdio_bus);
1353         if (i) {
1354                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1355                 mdiobus_free(tp->mdio_bus);
1356                 return i;
1357         }
1358
1359         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1360
1361         if (!phydev || !phydev->drv) {
1362                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1363                 mdiobus_unregister(tp->mdio_bus);
1364                 mdiobus_free(tp->mdio_bus);
1365                 return -ENODEV;
1366         }
1367
1368         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1369         case PHY_ID_BCM57780:
1370                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1371                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1372                 break;
1373         case PHY_ID_BCM50610:
1374         case PHY_ID_BCM50610M:
1375                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1376                                      PHY_BRCM_RX_REFCLK_UNUSED |
1377                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1378                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1379                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1380                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1381                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1382                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1383                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1384                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1385                 /* fallthru */
1386         case PHY_ID_RTL8211C:
1387                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1388                 break;
1389         case PHY_ID_RTL8201E:
1390         case PHY_ID_BCMAC131:
1391                 phydev->interface = PHY_INTERFACE_MODE_MII;
1392                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1393                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1394                 break;
1395         }
1396
1397         tg3_flag_set(tp, MDIOBUS_INITED);
1398
1399         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1400                 tg3_mdio_config_5785(tp);
1401
1402         return 0;
1403 }
1404
1405 static void tg3_mdio_fini(struct tg3 *tp)
1406 {
1407         if (tg3_flag(tp, MDIOBUS_INITED)) {
1408                 tg3_flag_clear(tp, MDIOBUS_INITED);
1409                 mdiobus_unregister(tp->mdio_bus);
1410                 mdiobus_free(tp->mdio_bus);
1411         }
1412 }
1413
1414 /* tp->lock is held. */
1415 static inline void tg3_generate_fw_event(struct tg3 *tp)
1416 {
1417         u32 val;
1418
1419         val = tr32(GRC_RX_CPU_EVENT);
1420         val |= GRC_RX_CPU_DRIVER_EVENT;
1421         tw32_f(GRC_RX_CPU_EVENT, val);
1422
1423         tp->last_event_jiffies = jiffies;
1424 }
1425
1426 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1427
1428 /* tp->lock is held. */
1429 static void tg3_wait_for_event_ack(struct tg3 *tp)
1430 {
1431         int i;
1432         unsigned int delay_cnt;
1433         long time_remain;
1434
1435         /* If enough time has passed, no wait is necessary. */
1436         time_remain = (long)(tp->last_event_jiffies + 1 +
1437                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1438                       (long)jiffies;
1439         if (time_remain < 0)
1440                 return;
1441
1442         /* Check if we can shorten the wait time. */
1443         delay_cnt = jiffies_to_usecs(time_remain);
1444         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1445                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1446         delay_cnt = (delay_cnt >> 3) + 1;
1447
1448         for (i = 0; i < delay_cnt; i++) {
1449                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1450                         break;
1451                 udelay(8);
1452         }
1453 }
1454
1455 /* tp->lock is held. */
1456 static void tg3_ump_link_report(struct tg3 *tp)
1457 {
1458         u32 reg;
1459         u32 val;
1460
1461         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1462                 return;
1463
1464         tg3_wait_for_event_ack(tp);
1465
1466         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1467
1468         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1469
1470         val = 0;
1471         if (!tg3_readphy(tp, MII_BMCR, &reg))
1472                 val = reg << 16;
1473         if (!tg3_readphy(tp, MII_BMSR, &reg))
1474                 val |= (reg & 0xffff);
1475         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1476
1477         val = 0;
1478         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1479                 val = reg << 16;
1480         if (!tg3_readphy(tp, MII_LPA, &reg))
1481                 val |= (reg & 0xffff);
1482         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1483
1484         val = 0;
1485         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1486                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1487                         val = reg << 16;
1488                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1489                         val |= (reg & 0xffff);
1490         }
1491         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1492
1493         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1494                 val = reg << 16;
1495         else
1496                 val = 0;
1497         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1498
1499         tg3_generate_fw_event(tp);
1500 }
1501
1502 /* tp->lock is held. */
1503 static void tg3_stop_fw(struct tg3 *tp)
1504 {
1505         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1506                 /* Wait for RX cpu to ACK the previous event. */
1507                 tg3_wait_for_event_ack(tp);
1508
1509                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1510
1511                 tg3_generate_fw_event(tp);
1512
1513                 /* Wait for RX cpu to ACK this event. */
1514                 tg3_wait_for_event_ack(tp);
1515         }
1516 }
1517
1518 /* tp->lock is held. */
1519 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1520 {
1521         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1522                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1523
1524         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1525                 switch (kind) {
1526                 case RESET_KIND_INIT:
1527                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1528                                       DRV_STATE_START);
1529                         break;
1530
1531                 case RESET_KIND_SHUTDOWN:
1532                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1533                                       DRV_STATE_UNLOAD);
1534                         break;
1535
1536                 case RESET_KIND_SUSPEND:
1537                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1538                                       DRV_STATE_SUSPEND);
1539                         break;
1540
1541                 default:
1542                         break;
1543                 }
1544         }
1545
1546         if (kind == RESET_KIND_INIT ||
1547             kind == RESET_KIND_SUSPEND)
1548                 tg3_ape_driver_state_change(tp, kind);
1549 }
1550
1551 /* tp->lock is held. */
1552 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1553 {
1554         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1555                 switch (kind) {
1556                 case RESET_KIND_INIT:
1557                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1558                                       DRV_STATE_START_DONE);
1559                         break;
1560
1561                 case RESET_KIND_SHUTDOWN:
1562                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1563                                       DRV_STATE_UNLOAD_DONE);
1564                         break;
1565
1566                 default:
1567                         break;
1568                 }
1569         }
1570
1571         if (kind == RESET_KIND_SHUTDOWN)
1572                 tg3_ape_driver_state_change(tp, kind);
1573 }
1574
1575 /* tp->lock is held. */
1576 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1577 {
1578         if (tg3_flag(tp, ENABLE_ASF)) {
1579                 switch (kind) {
1580                 case RESET_KIND_INIT:
1581                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1582                                       DRV_STATE_START);
1583                         break;
1584
1585                 case RESET_KIND_SHUTDOWN:
1586                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1587                                       DRV_STATE_UNLOAD);
1588                         break;
1589
1590                 case RESET_KIND_SUSPEND:
1591                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1592                                       DRV_STATE_SUSPEND);
1593                         break;
1594
1595                 default:
1596                         break;
1597                 }
1598         }
1599 }
1600
1601 static int tg3_poll_fw(struct tg3 *tp)
1602 {
1603         int i;
1604         u32 val;
1605
1606         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1607                 /* Wait up to 20ms for init done. */
1608                 for (i = 0; i < 200; i++) {
1609                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1610                                 return 0;
1611                         udelay(100);
1612                 }
1613                 return -ENODEV;
1614         }
1615
1616         /* Wait for firmware initialization to complete. */
1617         for (i = 0; i < 100000; i++) {
1618                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1619                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1620                         break;
1621                 udelay(10);
1622         }
1623
1624         /* Chip might not be fitted with firmware.  Some Sun onboard
1625          * parts are configured like that.  So don't signal the timeout
1626          * of the above loop as an error, but do report the lack of
1627          * running firmware once.
1628          */
1629         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1630                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1631
1632                 netdev_info(tp->dev, "No firmware running\n");
1633         }
1634
1635         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1636                 /* The 57765 A0 needs a little more
1637                  * time to do some important work.
1638                  */
1639                 mdelay(10);
1640         }
1641
1642         return 0;
1643 }
1644
1645 static void tg3_link_report(struct tg3 *tp)
1646 {
1647         if (!netif_carrier_ok(tp->dev)) {
1648                 netif_info(tp, link, tp->dev, "Link is down\n");
1649                 tg3_ump_link_report(tp);
1650         } else if (netif_msg_link(tp)) {
1651                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1652                             (tp->link_config.active_speed == SPEED_1000 ?
1653                              1000 :
1654                              (tp->link_config.active_speed == SPEED_100 ?
1655                               100 : 10)),
1656                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1657                              "full" : "half"));
1658
1659                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1660                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1661                             "on" : "off",
1662                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1663                             "on" : "off");
1664
1665                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1666                         netdev_info(tp->dev, "EEE is %s\n",
1667                                     tp->setlpicnt ? "enabled" : "disabled");
1668
1669                 tg3_ump_link_report(tp);
1670         }
1671 }
1672
1673 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1674 {
1675         u16 miireg;
1676
1677         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1678                 miireg = ADVERTISE_1000XPAUSE;
1679         else if (flow_ctrl & FLOW_CTRL_TX)
1680                 miireg = ADVERTISE_1000XPSE_ASYM;
1681         else if (flow_ctrl & FLOW_CTRL_RX)
1682                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1683         else
1684                 miireg = 0;
1685
1686         return miireg;
1687 }
1688
1689 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1690 {
1691         u8 cap = 0;
1692
1693         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1694                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1695         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1696                 if (lcladv & ADVERTISE_1000XPAUSE)
1697                         cap = FLOW_CTRL_RX;
1698                 if (rmtadv & ADVERTISE_1000XPAUSE)
1699                         cap = FLOW_CTRL_TX;
1700         }
1701
1702         return cap;
1703 }
1704
1705 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1706 {
1707         u8 autoneg;
1708         u8 flowctrl = 0;
1709         u32 old_rx_mode = tp->rx_mode;
1710         u32 old_tx_mode = tp->tx_mode;
1711
1712         if (tg3_flag(tp, USE_PHYLIB))
1713                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1714         else
1715                 autoneg = tp->link_config.autoneg;
1716
1717         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1718                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1719                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1720                 else
1721                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1722         } else
1723                 flowctrl = tp->link_config.flowctrl;
1724
1725         tp->link_config.active_flowctrl = flowctrl;
1726
1727         if (flowctrl & FLOW_CTRL_RX)
1728                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1729         else
1730                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1731
1732         if (old_rx_mode != tp->rx_mode)
1733                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1734
1735         if (flowctrl & FLOW_CTRL_TX)
1736                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1737         else
1738                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1739
1740         if (old_tx_mode != tp->tx_mode)
1741                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1742 }
1743
1744 static void tg3_adjust_link(struct net_device *dev)
1745 {
1746         u8 oldflowctrl, linkmesg = 0;
1747         u32 mac_mode, lcl_adv, rmt_adv;
1748         struct tg3 *tp = netdev_priv(dev);
1749         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1750
1751         spin_lock_bh(&tp->lock);
1752
1753         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1754                                     MAC_MODE_HALF_DUPLEX);
1755
1756         oldflowctrl = tp->link_config.active_flowctrl;
1757
1758         if (phydev->link) {
1759                 lcl_adv = 0;
1760                 rmt_adv = 0;
1761
1762                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1763                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1764                 else if (phydev->speed == SPEED_1000 ||
1765                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1766                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1767                 else
1768                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1769
1770                 if (phydev->duplex == DUPLEX_HALF)
1771                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1772                 else {
1773                         lcl_adv = mii_advertise_flowctrl(
1774                                   tp->link_config.flowctrl);
1775
1776                         if (phydev->pause)
1777                                 rmt_adv = LPA_PAUSE_CAP;
1778                         if (phydev->asym_pause)
1779                                 rmt_adv |= LPA_PAUSE_ASYM;
1780                 }
1781
1782                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1783         } else
1784                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1785
1786         if (mac_mode != tp->mac_mode) {
1787                 tp->mac_mode = mac_mode;
1788                 tw32_f(MAC_MODE, tp->mac_mode);
1789                 udelay(40);
1790         }
1791
1792         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1793                 if (phydev->speed == SPEED_10)
1794                         tw32(MAC_MI_STAT,
1795                              MAC_MI_STAT_10MBPS_MODE |
1796                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1797                 else
1798                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1799         }
1800
1801         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1802                 tw32(MAC_TX_LENGTHS,
1803                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1804                       (6 << TX_LENGTHS_IPG_SHIFT) |
1805                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1806         else
1807                 tw32(MAC_TX_LENGTHS,
1808                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1809                       (6 << TX_LENGTHS_IPG_SHIFT) |
1810                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1811
1812         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1813             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1814             phydev->speed != tp->link_config.active_speed ||
1815             phydev->duplex != tp->link_config.active_duplex ||
1816             oldflowctrl != tp->link_config.active_flowctrl)
1817                 linkmesg = 1;
1818
1819         tp->link_config.active_speed = phydev->speed;
1820         tp->link_config.active_duplex = phydev->duplex;
1821
1822         spin_unlock_bh(&tp->lock);
1823
1824         if (linkmesg)
1825                 tg3_link_report(tp);
1826 }
1827
1828 static int tg3_phy_init(struct tg3 *tp)
1829 {
1830         struct phy_device *phydev;
1831
1832         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1833                 return 0;
1834
1835         /* Bring the PHY back to a known state. */
1836         tg3_bmcr_reset(tp);
1837
1838         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1839
1840         /* Attach the MAC to the PHY. */
1841         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1842                              phydev->dev_flags, phydev->interface);
1843         if (IS_ERR(phydev)) {
1844                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1845                 return PTR_ERR(phydev);
1846         }
1847
1848         /* Mask with MAC supported features. */
1849         switch (phydev->interface) {
1850         case PHY_INTERFACE_MODE_GMII:
1851         case PHY_INTERFACE_MODE_RGMII:
1852                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1853                         phydev->supported &= (PHY_GBIT_FEATURES |
1854                                               SUPPORTED_Pause |
1855                                               SUPPORTED_Asym_Pause);
1856                         break;
1857                 }
1858                 /* fallthru */
1859         case PHY_INTERFACE_MODE_MII:
1860                 phydev->supported &= (PHY_BASIC_FEATURES |
1861                                       SUPPORTED_Pause |
1862                                       SUPPORTED_Asym_Pause);
1863                 break;
1864         default:
1865                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1866                 return -EINVAL;
1867         }
1868
1869         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1870
1871         phydev->advertising = phydev->supported;
1872
1873         return 0;
1874 }
1875
1876 static void tg3_phy_start(struct tg3 *tp)
1877 {
1878         struct phy_device *phydev;
1879
1880         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1881                 return;
1882
1883         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1884
1885         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1886                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1887                 phydev->speed = tp->link_config.orig_speed;
1888                 phydev->duplex = tp->link_config.orig_duplex;
1889                 phydev->autoneg = tp->link_config.orig_autoneg;
1890                 phydev->advertising = tp->link_config.orig_advertising;
1891         }
1892
1893         phy_start(phydev);
1894
1895         phy_start_aneg(phydev);
1896 }
1897
1898 static void tg3_phy_stop(struct tg3 *tp)
1899 {
1900         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1901                 return;
1902
1903         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1904 }
1905
1906 static void tg3_phy_fini(struct tg3 *tp)
1907 {
1908         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1909                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1910                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1911         }
1912 }
1913
1914 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1915 {
1916         int err;
1917         u32 val;
1918
1919         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1920                 return 0;
1921
1922         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1923                 /* Cannot do read-modify-write on 5401 */
1924                 err = tg3_phy_auxctl_write(tp,
1925                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1926                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1927                                            0x4c20);
1928                 goto done;
1929         }
1930
1931         err = tg3_phy_auxctl_read(tp,
1932                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1933         if (err)
1934                 return err;
1935
1936         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1937         err = tg3_phy_auxctl_write(tp,
1938                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1939
1940 done:
1941         return err;
1942 }
1943
1944 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1945 {
1946         u32 phytest;
1947
1948         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1949                 u32 phy;
1950
1951                 tg3_writephy(tp, MII_TG3_FET_TEST,
1952                              phytest | MII_TG3_FET_SHADOW_EN);
1953                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1954                         if (enable)
1955                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1956                         else
1957                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1958                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1959                 }
1960                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1961         }
1962 }
1963
1964 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1965 {
1966         u32 reg;
1967
1968         if (!tg3_flag(tp, 5705_PLUS) ||
1969             (tg3_flag(tp, 5717_PLUS) &&
1970              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1971                 return;
1972
1973         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1974                 tg3_phy_fet_toggle_apd(tp, enable);
1975                 return;
1976         }
1977
1978         reg = MII_TG3_MISC_SHDW_WREN |
1979               MII_TG3_MISC_SHDW_SCR5_SEL |
1980               MII_TG3_MISC_SHDW_SCR5_LPED |
1981               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1982               MII_TG3_MISC_SHDW_SCR5_SDTL |
1983               MII_TG3_MISC_SHDW_SCR5_C125OE;
1984         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1985                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1986
1987         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1988
1989
1990         reg = MII_TG3_MISC_SHDW_WREN |
1991               MII_TG3_MISC_SHDW_APD_SEL |
1992               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1993         if (enable)
1994                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1995
1996         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1997 }
1998
1999 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2000 {
2001         u32 phy;
2002
2003         if (!tg3_flag(tp, 5705_PLUS) ||
2004             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2005                 return;
2006
2007         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2008                 u32 ephy;
2009
2010                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2011                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2012
2013                         tg3_writephy(tp, MII_TG3_FET_TEST,
2014                                      ephy | MII_TG3_FET_SHADOW_EN);
2015                         if (!tg3_readphy(tp, reg, &phy)) {
2016                                 if (enable)
2017                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2018                                 else
2019                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2020                                 tg3_writephy(tp, reg, phy);
2021                         }
2022                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2023                 }
2024         } else {
2025                 int ret;
2026
2027                 ret = tg3_phy_auxctl_read(tp,
2028                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2029                 if (!ret) {
2030                         if (enable)
2031                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2032                         else
2033                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2034                         tg3_phy_auxctl_write(tp,
2035                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2036                 }
2037         }
2038 }
2039
2040 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2041 {
2042         int ret;
2043         u32 val;
2044
2045         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2046                 return;
2047
2048         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2049         if (!ret)
2050                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2051                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2052 }
2053
2054 static void tg3_phy_apply_otp(struct tg3 *tp)
2055 {
2056         u32 otp, phy;
2057
2058         if (!tp->phy_otp)
2059                 return;
2060
2061         otp = tp->phy_otp;
2062
2063         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2064                 return;
2065
2066         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2067         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2068         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2069
2070         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2071               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2072         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2073
2074         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2075         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2076         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2077
2078         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2079         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2080
2081         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2082         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2083
2084         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2085               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2086         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2087
2088         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2089 }
2090
2091 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2092 {
2093         u32 val;
2094
2095         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2096                 return;
2097
2098         tp->setlpicnt = 0;
2099
2100         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2101             current_link_up == 1 &&
2102             tp->link_config.active_duplex == DUPLEX_FULL &&
2103             (tp->link_config.active_speed == SPEED_100 ||
2104              tp->link_config.active_speed == SPEED_1000)) {
2105                 u32 eeectl;
2106
2107                 if (tp->link_config.active_speed == SPEED_1000)
2108                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2109                 else
2110                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2111
2112                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2113
2114                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2115                                   TG3_CL45_D7_EEERES_STAT, &val);
2116
2117                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2118                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2119                         tp->setlpicnt = 2;
2120         }
2121
2122         if (!tp->setlpicnt) {
2123                 if (current_link_up == 1 &&
2124                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2125                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2126                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2127                 }
2128
2129                 val = tr32(TG3_CPMU_EEE_MODE);
2130                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2131         }
2132 }
2133
2134 static void tg3_phy_eee_enable(struct tg3 *tp)
2135 {
2136         u32 val;
2137
2138         if (tp->link_config.active_speed == SPEED_1000 &&
2139             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2140              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2141              tg3_flag(tp, 57765_CLASS)) &&
2142             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2143                 val = MII_TG3_DSP_TAP26_ALNOKO |
2144                       MII_TG3_DSP_TAP26_RMRXSTO;
2145                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2146                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2147         }
2148
2149         val = tr32(TG3_CPMU_EEE_MODE);
2150         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2151 }
2152
2153 static int tg3_wait_macro_done(struct tg3 *tp)
2154 {
2155         int limit = 100;
2156
2157         while (limit--) {
2158                 u32 tmp32;
2159
2160                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2161                         if ((tmp32 & 0x1000) == 0)
2162                                 break;
2163                 }
2164         }
2165         if (limit < 0)
2166                 return -EBUSY;
2167
2168         return 0;
2169 }
2170
2171 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2172 {
2173         static const u32 test_pat[4][6] = {
2174         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2175         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2176         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2177         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2178         };
2179         int chan;
2180
2181         for (chan = 0; chan < 4; chan++) {
2182                 int i;
2183
2184                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2185                              (chan * 0x2000) | 0x0200);
2186                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2187
2188                 for (i = 0; i < 6; i++)
2189                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2190                                      test_pat[chan][i]);
2191
2192                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2193                 if (tg3_wait_macro_done(tp)) {
2194                         *resetp = 1;
2195                         return -EBUSY;
2196                 }
2197
2198                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2199                              (chan * 0x2000) | 0x0200);
2200                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2201                 if (tg3_wait_macro_done(tp)) {
2202                         *resetp = 1;
2203                         return -EBUSY;
2204                 }
2205
2206                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2207                 if (tg3_wait_macro_done(tp)) {
2208                         *resetp = 1;
2209                         return -EBUSY;
2210                 }
2211
2212                 for (i = 0; i < 6; i += 2) {
2213                         u32 low, high;
2214
2215                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2216                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2217                             tg3_wait_macro_done(tp)) {
2218                                 *resetp = 1;
2219                                 return -EBUSY;
2220                         }
2221                         low &= 0x7fff;
2222                         high &= 0x000f;
2223                         if (low != test_pat[chan][i] ||
2224                             high != test_pat[chan][i+1]) {
2225                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2226                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2227                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2228
2229                                 return -EBUSY;
2230                         }
2231                 }
2232         }
2233
2234         return 0;
2235 }
2236
2237 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2238 {
2239         int chan;
2240
2241         for (chan = 0; chan < 4; chan++) {
2242                 int i;
2243
2244                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2245                              (chan * 0x2000) | 0x0200);
2246                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2247                 for (i = 0; i < 6; i++)
2248                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2249                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2250                 if (tg3_wait_macro_done(tp))
2251                         return -EBUSY;
2252         }
2253
2254         return 0;
2255 }
2256
2257 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2258 {
2259         u32 reg32, phy9_orig;
2260         int retries, do_phy_reset, err;
2261
2262         retries = 10;
2263         do_phy_reset = 1;
2264         do {
2265                 if (do_phy_reset) {
2266                         err = tg3_bmcr_reset(tp);
2267                         if (err)
2268                                 return err;
2269                         do_phy_reset = 0;
2270                 }
2271
2272                 /* Disable transmitter and interrupt.  */
2273                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2274                         continue;
2275
2276                 reg32 |= 0x3000;
2277                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2278
2279                 /* Set full-duplex, 1000 mbps.  */
2280                 tg3_writephy(tp, MII_BMCR,
2281                              BMCR_FULLDPLX | BMCR_SPEED1000);
2282
2283                 /* Set to master mode.  */
2284                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2285                         continue;
2286
2287                 tg3_writephy(tp, MII_CTRL1000,
2288                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2289
2290                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2291                 if (err)
2292                         return err;
2293
2294                 /* Block the PHY control access.  */
2295                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2296
2297                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2298                 if (!err)
2299                         break;
2300         } while (--retries);
2301
2302         err = tg3_phy_reset_chanpat(tp);
2303         if (err)
2304                 return err;
2305
2306         tg3_phydsp_write(tp, 0x8005, 0x0000);
2307
2308         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2309         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2310
2311         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2312
2313         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2314
2315         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2316                 reg32 &= ~0x3000;
2317                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2318         } else if (!err)
2319                 err = -EBUSY;
2320
2321         return err;
2322 }
2323
2324 /* This will reset the tigon3 PHY if there is no valid
2325  * link unless the FORCE argument is non-zero.
2326  */
2327 static int tg3_phy_reset(struct tg3 *tp)
2328 {
2329         u32 val, cpmuctrl;
2330         int err;
2331
2332         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2333                 val = tr32(GRC_MISC_CFG);
2334                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2335                 udelay(40);
2336         }
2337         err  = tg3_readphy(tp, MII_BMSR, &val);
2338         err |= tg3_readphy(tp, MII_BMSR, &val);
2339         if (err != 0)
2340                 return -EBUSY;
2341
2342         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2343                 netif_carrier_off(tp->dev);
2344                 tg3_link_report(tp);
2345         }
2346
2347         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2348             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2349             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2350                 err = tg3_phy_reset_5703_4_5(tp);
2351                 if (err)
2352                         return err;
2353                 goto out;
2354         }
2355
2356         cpmuctrl = 0;
2357         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2358             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2359                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2360                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2361                         tw32(TG3_CPMU_CTRL,
2362                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2363         }
2364
2365         err = tg3_bmcr_reset(tp);
2366         if (err)
2367                 return err;
2368
2369         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2370                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2371                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2372
2373                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2374         }
2375
2376         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2377             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2378                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2379                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2380                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2381                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2382                         udelay(40);
2383                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2384                 }
2385         }
2386
2387         if (tg3_flag(tp, 5717_PLUS) &&
2388             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2389                 return 0;
2390
2391         tg3_phy_apply_otp(tp);
2392
2393         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2394                 tg3_phy_toggle_apd(tp, true);
2395         else
2396                 tg3_phy_toggle_apd(tp, false);
2397
2398 out:
2399         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2400             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2401                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2402                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2403                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2404         }
2405
2406         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2407                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2408                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2409         }
2410
2411         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2412                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2413                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2414                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2415                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2416                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2417                 }
2418         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2419                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2420                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2421                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2422                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2423                                 tg3_writephy(tp, MII_TG3_TEST1,
2424                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2425                         } else
2426                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2427
2428                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2429                 }
2430         }
2431
2432         /* Set Extended packet length bit (bit 14) on all chips that */
2433         /* support jumbo frames */
2434         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2435                 /* Cannot do read-modify-write on 5401 */
2436                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2437         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2438                 /* Set bit 14 with read-modify-write to preserve other bits */
2439                 err = tg3_phy_auxctl_read(tp,
2440                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2441                 if (!err)
2442                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2443                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2444         }
2445
2446         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2447          * jumbo frames transmission.
2448          */
2449         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2450                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2451                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2452                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2453         }
2454
2455         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2456                 /* adjust output voltage */
2457                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2458         }
2459
2460         tg3_phy_toggle_automdix(tp, 1);
2461         tg3_phy_set_wirespeed(tp);
2462         return 0;
2463 }
2464
2465 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2466 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2467 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2468                                           TG3_GPIO_MSG_NEED_VAUX)
2469 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2470         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2471          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2472          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2473          (TG3_GPIO_MSG_DRVR_PRES << 12))
2474
2475 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2476         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2477          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2478          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2479          (TG3_GPIO_MSG_NEED_VAUX << 12))
2480
2481 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2482 {
2483         u32 status, shift;
2484
2485         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2486             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2487                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2488         else
2489                 status = tr32(TG3_CPMU_DRV_STATUS);
2490
2491         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2492         status &= ~(TG3_GPIO_MSG_MASK << shift);
2493         status |= (newstat << shift);
2494
2495         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2496             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2497                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2498         else
2499                 tw32(TG3_CPMU_DRV_STATUS, status);
2500
2501         return status >> TG3_APE_GPIO_MSG_SHIFT;
2502 }
2503
2504 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2505 {
2506         if (!tg3_flag(tp, IS_NIC))
2507                 return 0;
2508
2509         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2510             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2511             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2512                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2513                         return -EIO;
2514
2515                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2516
2517                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2518                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2519
2520                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2521         } else {
2522                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2523                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2524         }
2525
2526         return 0;
2527 }
2528
2529 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2530 {
2531         u32 grc_local_ctrl;
2532
2533         if (!tg3_flag(tp, IS_NIC) ||
2534             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2535             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2536                 return;
2537
2538         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2539
2540         tw32_wait_f(GRC_LOCAL_CTRL,
2541                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2542                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2543
2544         tw32_wait_f(GRC_LOCAL_CTRL,
2545                     grc_local_ctrl,
2546                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2547
2548         tw32_wait_f(GRC_LOCAL_CTRL,
2549                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2550                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2551 }
2552
2553 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2554 {
2555         if (!tg3_flag(tp, IS_NIC))
2556                 return;
2557
2558         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2559             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2560                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2561                             (GRC_LCLCTRL_GPIO_OE0 |
2562                              GRC_LCLCTRL_GPIO_OE1 |
2563                              GRC_LCLCTRL_GPIO_OE2 |
2564                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2565                              GRC_LCLCTRL_GPIO_OUTPUT1),
2566                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2567         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2568                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2569                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2570                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2571                                      GRC_LCLCTRL_GPIO_OE1 |
2572                                      GRC_LCLCTRL_GPIO_OE2 |
2573                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2574                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2575                                      tp->grc_local_ctrl;
2576                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2577                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2578
2579                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2580                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2581                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2582
2583                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2584                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2585                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2586         } else {
2587                 u32 no_gpio2;
2588                 u32 grc_local_ctrl = 0;
2589
2590                 /* Workaround to prevent overdrawing Amps. */
2591                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2592                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2593                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2594                                     grc_local_ctrl,
2595                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2596                 }
2597
2598                 /* On 5753 and variants, GPIO2 cannot be used. */
2599                 no_gpio2 = tp->nic_sram_data_cfg &
2600                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2601
2602                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2603                                   GRC_LCLCTRL_GPIO_OE1 |
2604                                   GRC_LCLCTRL_GPIO_OE2 |
2605                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2606                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2607                 if (no_gpio2) {
2608                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2609                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2610                 }
2611                 tw32_wait_f(GRC_LOCAL_CTRL,
2612                             tp->grc_local_ctrl | grc_local_ctrl,
2613                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2614
2615                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2616
2617                 tw32_wait_f(GRC_LOCAL_CTRL,
2618                             tp->grc_local_ctrl | grc_local_ctrl,
2619                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2620
2621                 if (!no_gpio2) {
2622                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2623                         tw32_wait_f(GRC_LOCAL_CTRL,
2624                                     tp->grc_local_ctrl | grc_local_ctrl,
2625                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2626                 }
2627         }
2628 }
2629
2630 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2631 {
2632         u32 msg = 0;
2633
2634         /* Serialize power state transitions */
2635         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2636                 return;
2637
2638         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2639                 msg = TG3_GPIO_MSG_NEED_VAUX;
2640
2641         msg = tg3_set_function_status(tp, msg);
2642
2643         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2644                 goto done;
2645
2646         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2647                 tg3_pwrsrc_switch_to_vaux(tp);
2648         else
2649                 tg3_pwrsrc_die_with_vmain(tp);
2650
2651 done:
2652         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2653 }
2654
2655 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2656 {
2657         bool need_vaux = false;
2658
2659         /* The GPIOs do something completely different on 57765. */
2660         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2661                 return;
2662
2663         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2664             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2665             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2666                 tg3_frob_aux_power_5717(tp, include_wol ?
2667                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2668                 return;
2669         }
2670
2671         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2672                 struct net_device *dev_peer;
2673
2674                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2675
2676                 /* remove_one() may have been run on the peer. */
2677                 if (dev_peer) {
2678                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2679
2680                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2681                                 return;
2682
2683                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2684                             tg3_flag(tp_peer, ENABLE_ASF))
2685                                 need_vaux = true;
2686                 }
2687         }
2688
2689         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2690             tg3_flag(tp, ENABLE_ASF))
2691                 need_vaux = true;
2692
2693         if (need_vaux)
2694                 tg3_pwrsrc_switch_to_vaux(tp);
2695         else
2696                 tg3_pwrsrc_die_with_vmain(tp);
2697 }
2698
2699 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2700 {
2701         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2702                 return 1;
2703         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2704                 if (speed != SPEED_10)
2705                         return 1;
2706         } else if (speed == SPEED_10)
2707                 return 1;
2708
2709         return 0;
2710 }
2711
2712 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2713 {
2714         u32 val;
2715
2716         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2717                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2718                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2719                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2720
2721                         sg_dig_ctrl |=
2722                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2723                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2724                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2725                 }
2726                 return;
2727         }
2728
2729         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2730                 tg3_bmcr_reset(tp);
2731                 val = tr32(GRC_MISC_CFG);
2732                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2733                 udelay(40);
2734                 return;
2735         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2736                 u32 phytest;
2737                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2738                         u32 phy;
2739
2740                         tg3_writephy(tp, MII_ADVERTISE, 0);
2741                         tg3_writephy(tp, MII_BMCR,
2742                                      BMCR_ANENABLE | BMCR_ANRESTART);
2743
2744                         tg3_writephy(tp, MII_TG3_FET_TEST,
2745                                      phytest | MII_TG3_FET_SHADOW_EN);
2746                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2747                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2748                                 tg3_writephy(tp,
2749                                              MII_TG3_FET_SHDW_AUXMODE4,
2750                                              phy);
2751                         }
2752                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2753                 }
2754                 return;
2755         } else if (do_low_power) {
2756                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2757                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2758
2759                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2760                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2761                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2762                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2763         }
2764
2765         /* The PHY should not be powered down on some chips because
2766          * of bugs.
2767          */
2768         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2769             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2770             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2771              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2772                 return;
2773
2774         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2775             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2776                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2777                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2778                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2779                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2780         }
2781
2782         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2783 }
2784
2785 /* tp->lock is held. */
2786 static int tg3_nvram_lock(struct tg3 *tp)
2787 {
2788         if (tg3_flag(tp, NVRAM)) {
2789                 int i;
2790
2791                 if (tp->nvram_lock_cnt == 0) {
2792                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2793                         for (i = 0; i < 8000; i++) {
2794                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2795                                         break;
2796                                 udelay(20);
2797                         }
2798                         if (i == 8000) {
2799                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2800                                 return -ENODEV;
2801                         }
2802                 }
2803                 tp->nvram_lock_cnt++;
2804         }
2805         return 0;
2806 }
2807
2808 /* tp->lock is held. */
2809 static void tg3_nvram_unlock(struct tg3 *tp)
2810 {
2811         if (tg3_flag(tp, NVRAM)) {
2812                 if (tp->nvram_lock_cnt > 0)
2813                         tp->nvram_lock_cnt--;
2814                 if (tp->nvram_lock_cnt == 0)
2815                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2816         }
2817 }
2818
2819 /* tp->lock is held. */
2820 static void tg3_enable_nvram_access(struct tg3 *tp)
2821 {
2822         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2823                 u32 nvaccess = tr32(NVRAM_ACCESS);
2824
2825                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2826         }
2827 }
2828
2829 /* tp->lock is held. */
2830 static void tg3_disable_nvram_access(struct tg3 *tp)
2831 {
2832         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2833                 u32 nvaccess = tr32(NVRAM_ACCESS);
2834
2835                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2836         }
2837 }
2838
2839 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2840                                         u32 offset, u32 *val)
2841 {
2842         u32 tmp;
2843         int i;
2844
2845         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2846                 return -EINVAL;
2847
2848         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2849                                         EEPROM_ADDR_DEVID_MASK |
2850                                         EEPROM_ADDR_READ);
2851         tw32(GRC_EEPROM_ADDR,
2852              tmp |
2853              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2854              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2855               EEPROM_ADDR_ADDR_MASK) |
2856              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2857
2858         for (i = 0; i < 1000; i++) {
2859                 tmp = tr32(GRC_EEPROM_ADDR);
2860
2861                 if (tmp & EEPROM_ADDR_COMPLETE)
2862                         break;
2863                 msleep(1);
2864         }
2865         if (!(tmp & EEPROM_ADDR_COMPLETE))
2866                 return -EBUSY;
2867
2868         tmp = tr32(GRC_EEPROM_DATA);
2869
2870         /*
2871          * The data will always be opposite the native endian
2872          * format.  Perform a blind byteswap to compensate.
2873          */
2874         *val = swab32(tmp);
2875
2876         return 0;
2877 }
2878
2879 #define NVRAM_CMD_TIMEOUT 10000
2880
2881 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2882 {
2883         int i;
2884
2885         tw32(NVRAM_CMD, nvram_cmd);
2886         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2887                 udelay(10);
2888                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2889                         udelay(10);
2890                         break;
2891                 }
2892         }
2893
2894         if (i == NVRAM_CMD_TIMEOUT)
2895                 return -EBUSY;
2896
2897         return 0;
2898 }
2899
2900 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2901 {
2902         if (tg3_flag(tp, NVRAM) &&
2903             tg3_flag(tp, NVRAM_BUFFERED) &&
2904             tg3_flag(tp, FLASH) &&
2905             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2906             (tp->nvram_jedecnum == JEDEC_ATMEL))
2907
2908                 addr = ((addr / tp->nvram_pagesize) <<
2909                         ATMEL_AT45DB0X1B_PAGE_POS) +
2910                        (addr % tp->nvram_pagesize);
2911
2912         return addr;
2913 }
2914
2915 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2916 {
2917         if (tg3_flag(tp, NVRAM) &&
2918             tg3_flag(tp, NVRAM_BUFFERED) &&
2919             tg3_flag(tp, FLASH) &&
2920             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2921             (tp->nvram_jedecnum == JEDEC_ATMEL))
2922
2923                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2924                         tp->nvram_pagesize) +
2925                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2926
2927         return addr;
2928 }
2929
2930 /* NOTE: Data read in from NVRAM is byteswapped according to
2931  * the byteswapping settings for all other register accesses.
2932  * tg3 devices are BE devices, so on a BE machine, the data
2933  * returned will be exactly as it is seen in NVRAM.  On a LE
2934  * machine, the 32-bit value will be byteswapped.
2935  */
2936 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2937 {
2938         int ret;
2939
2940         if (!tg3_flag(tp, NVRAM))
2941                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2942
2943         offset = tg3_nvram_phys_addr(tp, offset);
2944
2945         if (offset > NVRAM_ADDR_MSK)
2946                 return -EINVAL;
2947
2948         ret = tg3_nvram_lock(tp);
2949         if (ret)
2950                 return ret;
2951
2952         tg3_enable_nvram_access(tp);
2953
2954         tw32(NVRAM_ADDR, offset);
2955         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2956                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2957
2958         if (ret == 0)
2959                 *val = tr32(NVRAM_RDDATA);
2960
2961         tg3_disable_nvram_access(tp);
2962
2963         tg3_nvram_unlock(tp);
2964
2965         return ret;
2966 }
2967
2968 /* Ensures NVRAM data is in bytestream format. */
2969 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2970 {
2971         u32 v;
2972         int res = tg3_nvram_read(tp, offset, &v);
2973         if (!res)
2974                 *val = cpu_to_be32(v);
2975         return res;
2976 }
2977
2978 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
2979                                     u32 offset, u32 len, u8 *buf)
2980 {
2981         int i, j, rc = 0;
2982         u32 val;
2983
2984         for (i = 0; i < len; i += 4) {
2985                 u32 addr;
2986                 __be32 data;
2987
2988                 addr = offset + i;
2989
2990                 memcpy(&data, buf + i, 4);
2991
2992                 /*
2993                  * The SEEPROM interface expects the data to always be opposite
2994                  * the native endian format.  We accomplish this by reversing
2995                  * all the operations that would have been performed on the
2996                  * data from a call to tg3_nvram_read_be32().
2997                  */
2998                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
2999
3000                 val = tr32(GRC_EEPROM_ADDR);
3001                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3002
3003                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3004                         EEPROM_ADDR_READ);
3005                 tw32(GRC_EEPROM_ADDR, val |
3006                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3007                         (addr & EEPROM_ADDR_ADDR_MASK) |
3008                         EEPROM_ADDR_START |
3009                         EEPROM_ADDR_WRITE);
3010
3011                 for (j = 0; j < 1000; j++) {
3012                         val = tr32(GRC_EEPROM_ADDR);
3013
3014                         if (val & EEPROM_ADDR_COMPLETE)
3015                                 break;
3016                         msleep(1);
3017                 }
3018                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3019                         rc = -EBUSY;
3020                         break;
3021                 }
3022         }
3023
3024         return rc;
3025 }
3026
3027 /* offset and length are dword aligned */
3028 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3029                 u8 *buf)
3030 {
3031         int ret = 0;
3032         u32 pagesize = tp->nvram_pagesize;
3033         u32 pagemask = pagesize - 1;
3034         u32 nvram_cmd;
3035         u8 *tmp;
3036
3037         tmp = kmalloc(pagesize, GFP_KERNEL);
3038         if (tmp == NULL)
3039                 return -ENOMEM;
3040
3041         while (len) {
3042                 int j;
3043                 u32 phy_addr, page_off, size;
3044
3045                 phy_addr = offset & ~pagemask;
3046
3047                 for (j = 0; j < pagesize; j += 4) {
3048                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3049                                                   (__be32 *) (tmp + j));
3050                         if (ret)
3051                                 break;
3052                 }
3053                 if (ret)
3054                         break;
3055
3056                 page_off = offset & pagemask;
3057                 size = pagesize;
3058                 if (len < size)
3059                         size = len;
3060
3061                 len -= size;
3062
3063                 memcpy(tmp + page_off, buf, size);
3064
3065                 offset = offset + (pagesize - page_off);
3066
3067                 tg3_enable_nvram_access(tp);
3068
3069                 /*
3070                  * Before we can erase the flash page, we need
3071                  * to issue a special "write enable" command.
3072                  */
3073                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3074
3075                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3076                         break;
3077
3078                 /* Erase the target page */
3079                 tw32(NVRAM_ADDR, phy_addr);
3080
3081                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3082                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3083
3084                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3085                         break;
3086
3087                 /* Issue another write enable to start the write. */
3088                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3089
3090                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3091                         break;
3092
3093                 for (j = 0; j < pagesize; j += 4) {
3094                         __be32 data;
3095
3096                         data = *((__be32 *) (tmp + j));
3097
3098                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3099
3100                         tw32(NVRAM_ADDR, phy_addr + j);
3101
3102                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3103                                 NVRAM_CMD_WR;
3104
3105                         if (j == 0)
3106                                 nvram_cmd |= NVRAM_CMD_FIRST;
3107                         else if (j == (pagesize - 4))
3108                                 nvram_cmd |= NVRAM_CMD_LAST;
3109
3110                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3111                         if (ret)
3112                                 break;
3113                 }
3114                 if (ret)
3115                         break;
3116         }
3117
3118         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3119         tg3_nvram_exec_cmd(tp, nvram_cmd);
3120
3121         kfree(tmp);
3122
3123         return ret;
3124 }
3125
3126 /* offset and length are dword aligned */
3127 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3128                 u8 *buf)
3129 {
3130         int i, ret = 0;
3131
3132         for (i = 0; i < len; i += 4, offset += 4) {
3133                 u32 page_off, phy_addr, nvram_cmd;
3134                 __be32 data;
3135
3136                 memcpy(&data, buf + i, 4);
3137                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3138
3139                 page_off = offset % tp->nvram_pagesize;
3140
3141                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3142
3143                 tw32(NVRAM_ADDR, phy_addr);
3144
3145                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3146
3147                 if (page_off == 0 || i == 0)
3148                         nvram_cmd |= NVRAM_CMD_FIRST;
3149                 if (page_off == (tp->nvram_pagesize - 4))
3150                         nvram_cmd |= NVRAM_CMD_LAST;
3151
3152                 if (i == (len - 4))
3153                         nvram_cmd |= NVRAM_CMD_LAST;
3154
3155                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3156                     !tg3_flag(tp, 5755_PLUS) &&
3157                     (tp->nvram_jedecnum == JEDEC_ST) &&
3158                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3159                         u32 cmd;
3160
3161                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3162                         ret = tg3_nvram_exec_cmd(tp, cmd);
3163                         if (ret)
3164                                 break;
3165                 }
3166                 if (!tg3_flag(tp, FLASH)) {
3167                         /* We always do complete word writes to eeprom. */
3168                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3169                 }
3170
3171                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3172                 if (ret)
3173                         break;
3174         }
3175         return ret;
3176 }
3177
3178 /* offset and length are dword aligned */
3179 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3180 {
3181         int ret;
3182
3183         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3184                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3185                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3186                 udelay(40);
3187         }
3188
3189         if (!tg3_flag(tp, NVRAM)) {
3190                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3191         } else {
3192                 u32 grc_mode;
3193
3194                 ret = tg3_nvram_lock(tp);
3195                 if (ret)
3196                         return ret;
3197
3198                 tg3_enable_nvram_access(tp);
3199                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3200                         tw32(NVRAM_WRITE1, 0x406);
3201
3202                 grc_mode = tr32(GRC_MODE);
3203                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3204
3205                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3206                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3207                                 buf);
3208                 } else {
3209                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3210                                 buf);
3211                 }
3212
3213                 grc_mode = tr32(GRC_MODE);
3214                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3215
3216                 tg3_disable_nvram_access(tp);
3217                 tg3_nvram_unlock(tp);
3218         }
3219
3220         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3221                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3222                 udelay(40);
3223         }
3224
3225         return ret;
3226 }
3227
3228 #define RX_CPU_SCRATCH_BASE     0x30000
3229 #define RX_CPU_SCRATCH_SIZE     0x04000
3230 #define TX_CPU_SCRATCH_BASE     0x34000
3231 #define TX_CPU_SCRATCH_SIZE     0x04000
3232
3233 /* tp->lock is held. */
3234 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3235 {
3236         int i;
3237
3238         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3239
3240         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3241                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3242
3243                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3244                 return 0;
3245         }
3246         if (offset == RX_CPU_BASE) {
3247                 for (i = 0; i < 10000; i++) {
3248                         tw32(offset + CPU_STATE, 0xffffffff);
3249                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3250                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3251                                 break;
3252                 }
3253
3254                 tw32(offset + CPU_STATE, 0xffffffff);
3255                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3256                 udelay(10);
3257         } else {
3258                 for (i = 0; i < 10000; i++) {
3259                         tw32(offset + CPU_STATE, 0xffffffff);
3260                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3261                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3262                                 break;
3263                 }
3264         }
3265
3266         if (i >= 10000) {
3267                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3268                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3269                 return -ENODEV;
3270         }
3271
3272         /* Clear firmware's nvram arbitration. */
3273         if (tg3_flag(tp, NVRAM))
3274                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3275         return 0;
3276 }
3277
3278 struct fw_info {
3279         unsigned int fw_base;
3280         unsigned int fw_len;
3281         const __be32 *fw_data;
3282 };
3283
3284 /* tp->lock is held. */
3285 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3286                                  u32 cpu_scratch_base, int cpu_scratch_size,
3287                                  struct fw_info *info)
3288 {
3289         int err, lock_err, i;
3290         void (*write_op)(struct tg3 *, u32, u32);
3291
3292         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3293                 netdev_err(tp->dev,
3294                            "%s: Trying to load TX cpu firmware which is 5705\n",
3295                            __func__);
3296                 return -EINVAL;
3297         }
3298
3299         if (tg3_flag(tp, 5705_PLUS))
3300                 write_op = tg3_write_mem;
3301         else
3302                 write_op = tg3_write_indirect_reg32;
3303
3304         /* It is possible that bootcode is still loading at this point.
3305          * Get the nvram lock first before halting the cpu.
3306          */
3307         lock_err = tg3_nvram_lock(tp);
3308         err = tg3_halt_cpu(tp, cpu_base);
3309         if (!lock_err)
3310                 tg3_nvram_unlock(tp);
3311         if (err)
3312                 goto out;
3313
3314         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3315                 write_op(tp, cpu_scratch_base + i, 0);
3316         tw32(cpu_base + CPU_STATE, 0xffffffff);
3317         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3318         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3319                 write_op(tp, (cpu_scratch_base +
3320                               (info->fw_base & 0xffff) +
3321                               (i * sizeof(u32))),
3322                               be32_to_cpu(info->fw_data[i]));
3323
3324         err = 0;
3325
3326 out:
3327         return err;
3328 }
3329
3330 /* tp->lock is held. */
3331 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3332 {
3333         struct fw_info info;
3334         const __be32 *fw_data;
3335         int err, i;
3336
3337         fw_data = (void *)tp->fw->data;
3338
3339         /* Firmware blob starts with version numbers, followed by
3340            start address and length. We are setting complete length.
3341            length = end_address_of_bss - start_address_of_text.
3342            Remainder is the blob to be loaded contiguously
3343            from start address. */
3344
3345         info.fw_base = be32_to_cpu(fw_data[1]);
3346         info.fw_len = tp->fw->size - 12;
3347         info.fw_data = &fw_data[3];
3348
3349         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3350                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3351                                     &info);
3352         if (err)
3353                 return err;
3354
3355         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3356                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3357                                     &info);
3358         if (err)
3359                 return err;
3360
3361         /* Now startup only the RX cpu. */
3362         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3363         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3364
3365         for (i = 0; i < 5; i++) {
3366                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3367                         break;
3368                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3369                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3370                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3371                 udelay(1000);
3372         }
3373         if (i >= 5) {
3374                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3375                            "should be %08x\n", __func__,
3376                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3377                 return -ENODEV;
3378         }
3379         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3380         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3381
3382         return 0;
3383 }
3384
3385 /* tp->lock is held. */
3386 static int tg3_load_tso_firmware(struct tg3 *tp)
3387 {
3388         struct fw_info info;
3389         const __be32 *fw_data;
3390         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3391         int err, i;
3392
3393         if (tg3_flag(tp, HW_TSO_1) ||
3394             tg3_flag(tp, HW_TSO_2) ||
3395             tg3_flag(tp, HW_TSO_3))
3396                 return 0;
3397
3398         fw_data = (void *)tp->fw->data;
3399
3400         /* Firmware blob starts with version numbers, followed by
3401            start address and length. We are setting complete length.
3402            length = end_address_of_bss - start_address_of_text.
3403            Remainder is the blob to be loaded contiguously
3404            from start address. */
3405
3406         info.fw_base = be32_to_cpu(fw_data[1]);
3407         cpu_scratch_size = tp->fw_len;
3408         info.fw_len = tp->fw->size - 12;
3409         info.fw_data = &fw_data[3];
3410
3411         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3412                 cpu_base = RX_CPU_BASE;
3413                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3414         } else {
3415                 cpu_base = TX_CPU_BASE;
3416                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3417                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3418         }
3419
3420         err = tg3_load_firmware_cpu(tp, cpu_base,
3421                                     cpu_scratch_base, cpu_scratch_size,
3422                                     &info);
3423         if (err)
3424                 return err;
3425
3426         /* Now startup the cpu. */
3427         tw32(cpu_base + CPU_STATE, 0xffffffff);
3428         tw32_f(cpu_base + CPU_PC, info.fw_base);
3429
3430         for (i = 0; i < 5; i++) {
3431                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3432                         break;
3433                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3434                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3435                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3436                 udelay(1000);
3437         }
3438         if (i >= 5) {
3439                 netdev_err(tp->dev,
3440                            "%s fails to set CPU PC, is %08x should be %08x\n",
3441                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3442                 return -ENODEV;
3443         }
3444         tw32(cpu_base + CPU_STATE, 0xffffffff);
3445         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3446         return 0;
3447 }
3448
3449
3450 /* tp->lock is held. */
3451 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3452 {
3453         u32 addr_high, addr_low;
3454         int i;
3455
3456         addr_high = ((tp->dev->dev_addr[0] << 8) |
3457                      tp->dev->dev_addr[1]);
3458         addr_low = ((tp->dev->dev_addr[2] << 24) |
3459                     (tp->dev->dev_addr[3] << 16) |
3460                     (tp->dev->dev_addr[4] <<  8) |
3461                     (tp->dev->dev_addr[5] <<  0));
3462         for (i = 0; i < 4; i++) {
3463                 if (i == 1 && skip_mac_1)
3464                         continue;
3465                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3466                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3467         }
3468
3469         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3470             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3471                 for (i = 0; i < 12; i++) {
3472                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3473                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3474                 }
3475         }
3476
3477         addr_high = (tp->dev->dev_addr[0] +
3478                      tp->dev->dev_addr[1] +
3479                      tp->dev->dev_addr[2] +
3480                      tp->dev->dev_addr[3] +
3481                      tp->dev->dev_addr[4] +
3482                      tp->dev->dev_addr[5]) &
3483                 TX_BACKOFF_SEED_MASK;
3484         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3485 }
3486
3487 static void tg3_enable_register_access(struct tg3 *tp)
3488 {
3489         /*
3490          * Make sure register accesses (indirect or otherwise) will function
3491          * correctly.
3492          */
3493         pci_write_config_dword(tp->pdev,
3494                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3495 }
3496
3497 static int tg3_power_up(struct tg3 *tp)
3498 {
3499         int err;
3500
3501         tg3_enable_register_access(tp);
3502
3503         err = pci_set_power_state(tp->pdev, PCI_D0);
3504         if (!err) {
3505                 /* Switch out of Vaux if it is a NIC */
3506                 tg3_pwrsrc_switch_to_vmain(tp);
3507         } else {
3508                 netdev_err(tp->dev, "Transition to D0 failed\n");
3509         }
3510
3511         return err;
3512 }
3513
3514 static int tg3_setup_phy(struct tg3 *, int);
3515
3516 static int tg3_power_down_prepare(struct tg3 *tp)
3517 {
3518         u32 misc_host_ctrl;
3519         bool device_should_wake, do_low_power;
3520
3521         tg3_enable_register_access(tp);
3522
3523         /* Restore the CLKREQ setting. */
3524         if (tg3_flag(tp, CLKREQ_BUG)) {
3525                 u16 lnkctl;
3526
3527                 pci_read_config_word(tp->pdev,
3528                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3529                                      &lnkctl);
3530                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3531                 pci_write_config_word(tp->pdev,
3532                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3533                                       lnkctl);
3534         }
3535
3536         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3537         tw32(TG3PCI_MISC_HOST_CTRL,
3538              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3539
3540         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3541                              tg3_flag(tp, WOL_ENABLE);
3542
3543         if (tg3_flag(tp, USE_PHYLIB)) {
3544                 do_low_power = false;
3545                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3546                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3547                         struct phy_device *phydev;
3548                         u32 phyid, advertising;
3549
3550                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3551
3552                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3553
3554                         tp->link_config.orig_speed = phydev->speed;
3555                         tp->link_config.orig_duplex = phydev->duplex;
3556                         tp->link_config.orig_autoneg = phydev->autoneg;
3557                         tp->link_config.orig_advertising = phydev->advertising;
3558
3559                         advertising = ADVERTISED_TP |
3560                                       ADVERTISED_Pause |
3561                                       ADVERTISED_Autoneg |
3562                                       ADVERTISED_10baseT_Half;
3563
3564                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3565                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3566                                         advertising |=
3567                                                 ADVERTISED_100baseT_Half |
3568                                                 ADVERTISED_100baseT_Full |
3569                                                 ADVERTISED_10baseT_Full;
3570                                 else
3571                                         advertising |= ADVERTISED_10baseT_Full;
3572                         }
3573
3574                         phydev->advertising = advertising;
3575
3576                         phy_start_aneg(phydev);
3577
3578                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3579                         if (phyid != PHY_ID_BCMAC131) {
3580                                 phyid &= PHY_BCM_OUI_MASK;
3581                                 if (phyid == PHY_BCM_OUI_1 ||
3582                                     phyid == PHY_BCM_OUI_2 ||
3583                                     phyid == PHY_BCM_OUI_3)
3584                                         do_low_power = true;
3585                         }
3586                 }
3587         } else {
3588                 do_low_power = true;
3589
3590                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3591                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3592                         tp->link_config.orig_speed = tp->link_config.speed;
3593                         tp->link_config.orig_duplex = tp->link_config.duplex;
3594                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
3595                 }
3596
3597                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
3598                         tp->link_config.speed = SPEED_10;
3599                         tp->link_config.duplex = DUPLEX_HALF;
3600                         tp->link_config.autoneg = AUTONEG_ENABLE;
3601                         tg3_setup_phy(tp, 0);
3602                 }
3603         }
3604
3605         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3606                 u32 val;
3607
3608                 val = tr32(GRC_VCPU_EXT_CTRL);
3609                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3610         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3611                 int i;
3612                 u32 val;
3613
3614                 for (i = 0; i < 200; i++) {
3615                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3616                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3617                                 break;
3618                         msleep(1);
3619                 }
3620         }
3621         if (tg3_flag(tp, WOL_CAP))
3622                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3623                                                      WOL_DRV_STATE_SHUTDOWN |
3624                                                      WOL_DRV_WOL |
3625                                                      WOL_SET_MAGIC_PKT);
3626
3627         if (device_should_wake) {
3628                 u32 mac_mode;
3629
3630                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3631                         if (do_low_power &&
3632                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3633                                 tg3_phy_auxctl_write(tp,
3634                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3635                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3636                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3637                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3638                                 udelay(40);
3639                         }
3640
3641                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3642                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3643                         else
3644                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3645
3646                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3647                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3648                             ASIC_REV_5700) {
3649                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3650                                              SPEED_100 : SPEED_10;
3651                                 if (tg3_5700_link_polarity(tp, speed))
3652                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3653                                 else
3654                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3655                         }
3656                 } else {
3657                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3658                 }
3659
3660                 if (!tg3_flag(tp, 5750_PLUS))
3661                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3662
3663                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3664                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3665                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3666                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3667
3668                 if (tg3_flag(tp, ENABLE_APE))
3669                         mac_mode |= MAC_MODE_APE_TX_EN |
3670                                     MAC_MODE_APE_RX_EN |
3671                                     MAC_MODE_TDE_ENABLE;
3672
3673                 tw32_f(MAC_MODE, mac_mode);
3674                 udelay(100);
3675
3676                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3677                 udelay(10);
3678         }
3679
3680         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3681             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3682              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3683                 u32 base_val;
3684
3685                 base_val = tp->pci_clock_ctrl;
3686                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3687                              CLOCK_CTRL_TXCLK_DISABLE);
3688
3689                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3690                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3691         } else if (tg3_flag(tp, 5780_CLASS) ||
3692                    tg3_flag(tp, CPMU_PRESENT) ||
3693                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3694                 /* do nothing */
3695         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3696                 u32 newbits1, newbits2;
3697
3698                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3699                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3700                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3701                                     CLOCK_CTRL_TXCLK_DISABLE |
3702                                     CLOCK_CTRL_ALTCLK);
3703                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3704                 } else if (tg3_flag(tp, 5705_PLUS)) {
3705                         newbits1 = CLOCK_CTRL_625_CORE;
3706                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3707                 } else {
3708                         newbits1 = CLOCK_CTRL_ALTCLK;
3709                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3710                 }
3711
3712                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3713                             40);
3714
3715                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3716                             40);
3717
3718                 if (!tg3_flag(tp, 5705_PLUS)) {
3719                         u32 newbits3;
3720
3721                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3722                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3723                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3724                                             CLOCK_CTRL_TXCLK_DISABLE |
3725                                             CLOCK_CTRL_44MHZ_CORE);
3726                         } else {
3727                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3728                         }
3729
3730                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3731                                     tp->pci_clock_ctrl | newbits3, 40);
3732                 }
3733         }
3734
3735         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3736                 tg3_power_down_phy(tp, do_low_power);
3737
3738         tg3_frob_aux_power(tp, true);
3739
3740         /* Workaround for unstable PLL clock */
3741         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3742             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3743                 u32 val = tr32(0x7d00);
3744
3745                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3746                 tw32(0x7d00, val);
3747                 if (!tg3_flag(tp, ENABLE_ASF)) {
3748                         int err;
3749
3750                         err = tg3_nvram_lock(tp);
3751                         tg3_halt_cpu(tp, RX_CPU_BASE);
3752                         if (!err)
3753                                 tg3_nvram_unlock(tp);
3754                 }
3755         }
3756
3757         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3758
3759         return 0;
3760 }
3761
3762 static void tg3_power_down(struct tg3 *tp)
3763 {
3764         tg3_power_down_prepare(tp);
3765
3766         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3767         pci_set_power_state(tp->pdev, PCI_D3hot);
3768 }
3769
3770 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3771 {
3772         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3773         case MII_TG3_AUX_STAT_10HALF:
3774                 *speed = SPEED_10;
3775                 *duplex = DUPLEX_HALF;
3776                 break;
3777
3778         case MII_TG3_AUX_STAT_10FULL:
3779                 *speed = SPEED_10;
3780                 *duplex = DUPLEX_FULL;
3781                 break;
3782
3783         case MII_TG3_AUX_STAT_100HALF:
3784                 *speed = SPEED_100;
3785                 *duplex = DUPLEX_HALF;
3786                 break;
3787
3788         case MII_TG3_AUX_STAT_100FULL:
3789                 *speed = SPEED_100;
3790                 *duplex = DUPLEX_FULL;
3791                 break;
3792
3793         case MII_TG3_AUX_STAT_1000HALF:
3794                 *speed = SPEED_1000;
3795                 *duplex = DUPLEX_HALF;
3796                 break;
3797
3798         case MII_TG3_AUX_STAT_1000FULL:
3799                 *speed = SPEED_1000;
3800                 *duplex = DUPLEX_FULL;
3801                 break;
3802
3803         default:
3804                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3805                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3806                                  SPEED_10;
3807                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3808                                   DUPLEX_HALF;
3809                         break;
3810                 }
3811                 *speed = SPEED_INVALID;
3812                 *duplex = DUPLEX_INVALID;
3813                 break;
3814         }
3815 }
3816
3817 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3818 {
3819         int err = 0;
3820         u32 val, new_adv;
3821
3822         new_adv = ADVERTISE_CSMA;
3823         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3824         new_adv |= mii_advertise_flowctrl(flowctrl);
3825
3826         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3827         if (err)
3828                 goto done;
3829
3830         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3831                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3832
3833                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3834                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3835                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3836
3837                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3838                 if (err)
3839                         goto done;
3840         }
3841
3842         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3843                 goto done;
3844
3845         tw32(TG3_CPMU_EEE_MODE,
3846              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3847
3848         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3849         if (!err) {
3850                 u32 err2;
3851
3852                 val = 0;
3853                 /* Advertise 100-BaseTX EEE ability */
3854                 if (advertise & ADVERTISED_100baseT_Full)
3855                         val |= MDIO_AN_EEE_ADV_100TX;
3856                 /* Advertise 1000-BaseT EEE ability */
3857                 if (advertise & ADVERTISED_1000baseT_Full)
3858                         val |= MDIO_AN_EEE_ADV_1000T;
3859                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3860                 if (err)
3861                         val = 0;
3862
3863                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3864                 case ASIC_REV_5717:
3865                 case ASIC_REV_57765:
3866                 case ASIC_REV_57766:
3867                 case ASIC_REV_5719:
3868                         /* If we advertised any eee advertisements above... */
3869                         if (val)
3870                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3871                                       MII_TG3_DSP_TAP26_RMRXSTO |
3872                                       MII_TG3_DSP_TAP26_OPCSINPT;
3873                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3874                         /* Fall through */
3875                 case ASIC_REV_5720:
3876                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3877                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3878                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3879                 }
3880
3881                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3882                 if (!err)
3883                         err = err2;
3884         }
3885
3886 done:
3887         return err;
3888 }
3889
3890 static void tg3_phy_copper_begin(struct tg3 *tp)
3891 {
3892         u32 new_adv;
3893         int i;
3894
3895         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3896                 new_adv = ADVERTISED_10baseT_Half |
3897                           ADVERTISED_10baseT_Full;
3898                 if (tg3_flag(tp, WOL_SPEED_100MB))
3899                         new_adv |= ADVERTISED_100baseT_Half |
3900                                    ADVERTISED_100baseT_Full;
3901
3902                 tg3_phy_autoneg_cfg(tp, new_adv,
3903                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3904         } else if (tp->link_config.speed == SPEED_INVALID) {
3905                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3906                         tp->link_config.advertising &=
3907                                 ~(ADVERTISED_1000baseT_Half |
3908                                   ADVERTISED_1000baseT_Full);
3909
3910                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3911                                     tp->link_config.flowctrl);
3912         } else {
3913                 /* Asking for a specific link mode. */
3914                 if (tp->link_config.speed == SPEED_1000) {
3915                         if (tp->link_config.duplex == DUPLEX_FULL)
3916                                 new_adv = ADVERTISED_1000baseT_Full;
3917                         else
3918                                 new_adv = ADVERTISED_1000baseT_Half;
3919                 } else if (tp->link_config.speed == SPEED_100) {
3920                         if (tp->link_config.duplex == DUPLEX_FULL)
3921                                 new_adv = ADVERTISED_100baseT_Full;
3922                         else
3923                                 new_adv = ADVERTISED_100baseT_Half;
3924                 } else {
3925                         if (tp->link_config.duplex == DUPLEX_FULL)
3926                                 new_adv = ADVERTISED_10baseT_Full;
3927                         else
3928                                 new_adv = ADVERTISED_10baseT_Half;
3929                 }
3930
3931                 tg3_phy_autoneg_cfg(tp, new_adv,
3932                                     tp->link_config.flowctrl);
3933         }
3934
3935         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3936             tp->link_config.speed != SPEED_INVALID) {
3937                 u32 bmcr, orig_bmcr;
3938
3939                 tp->link_config.active_speed = tp->link_config.speed;
3940                 tp->link_config.active_duplex = tp->link_config.duplex;
3941
3942                 bmcr = 0;
3943                 switch (tp->link_config.speed) {
3944                 default:
3945                 case SPEED_10:
3946                         break;
3947
3948                 case SPEED_100:
3949                         bmcr |= BMCR_SPEED100;
3950                         break;
3951
3952                 case SPEED_1000:
3953                         bmcr |= BMCR_SPEED1000;
3954                         break;
3955                 }
3956
3957                 if (tp->link_config.duplex == DUPLEX_FULL)
3958                         bmcr |= BMCR_FULLDPLX;
3959
3960                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3961                     (bmcr != orig_bmcr)) {
3962                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3963                         for (i = 0; i < 1500; i++) {
3964                                 u32 tmp;
3965
3966                                 udelay(10);
3967                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3968                                     tg3_readphy(tp, MII_BMSR, &tmp))
3969                                         continue;
3970                                 if (!(tmp & BMSR_LSTATUS)) {
3971                                         udelay(40);
3972                                         break;
3973                                 }
3974                         }
3975                         tg3_writephy(tp, MII_BMCR, bmcr);
3976                         udelay(40);
3977                 }
3978         } else {
3979                 tg3_writephy(tp, MII_BMCR,
3980                              BMCR_ANENABLE | BMCR_ANRESTART);
3981         }
3982 }
3983
3984 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3985 {
3986         int err;
3987
3988         /* Turn off tap power management. */
3989         /* Set Extended packet length bit */
3990         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3991
3992         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3993         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3994         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3995         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3996         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3997
3998         udelay(40);
3999
4000         return err;
4001 }
4002
4003 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4004 {
4005         u32 advmsk, tgtadv, advertising;
4006
4007         advertising = tp->link_config.advertising;
4008         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4009
4010         advmsk = ADVERTISE_ALL;
4011         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4012                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4013                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4014         }
4015
4016         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4017                 return false;
4018
4019         if ((*lcladv & advmsk) != tgtadv)
4020                 return false;
4021
4022         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4023                 u32 tg3_ctrl;
4024
4025                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4026
4027                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4028                         return false;
4029
4030                 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4031                 if (tg3_ctrl != tgtadv)
4032                         return false;
4033         }
4034
4035         return true;
4036 }
4037
4038 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4039 {
4040         u32 lpeth = 0;
4041
4042         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4043                 u32 val;
4044
4045                 if (tg3_readphy(tp, MII_STAT1000, &val))
4046                         return false;
4047
4048                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4049         }
4050
4051         if (tg3_readphy(tp, MII_LPA, rmtadv))
4052                 return false;
4053
4054         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4055         tp->link_config.rmt_adv = lpeth;
4056
4057         return true;
4058 }
4059
4060 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4061 {
4062         int current_link_up;
4063         u32 bmsr, val;
4064         u32 lcl_adv, rmt_adv;
4065         u16 current_speed;
4066         u8 current_duplex;
4067         int i, err;
4068
4069         tw32(MAC_EVENT, 0);
4070
4071         tw32_f(MAC_STATUS,
4072              (MAC_STATUS_SYNC_CHANGED |
4073               MAC_STATUS_CFG_CHANGED |
4074               MAC_STATUS_MI_COMPLETION |
4075               MAC_STATUS_LNKSTATE_CHANGED));
4076         udelay(40);
4077
4078         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4079                 tw32_f(MAC_MI_MODE,
4080                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4081                 udelay(80);
4082         }
4083
4084         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4085
4086         /* Some third-party PHYs need to be reset on link going
4087          * down.
4088          */
4089         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4090              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4091              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4092             netif_carrier_ok(tp->dev)) {
4093                 tg3_readphy(tp, MII_BMSR, &bmsr);
4094                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4095                     !(bmsr & BMSR_LSTATUS))
4096                         force_reset = 1;
4097         }
4098         if (force_reset)
4099                 tg3_phy_reset(tp);
4100
4101         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4102                 tg3_readphy(tp, MII_BMSR, &bmsr);
4103                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4104                     !tg3_flag(tp, INIT_COMPLETE))
4105                         bmsr = 0;
4106
4107                 if (!(bmsr & BMSR_LSTATUS)) {
4108                         err = tg3_init_5401phy_dsp(tp);
4109                         if (err)
4110                                 return err;
4111
4112                         tg3_readphy(tp, MII_BMSR, &bmsr);
4113                         for (i = 0; i < 1000; i++) {
4114                                 udelay(10);
4115                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4116                                     (bmsr & BMSR_LSTATUS)) {
4117                                         udelay(40);
4118                                         break;
4119                                 }
4120                         }
4121
4122                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4123                             TG3_PHY_REV_BCM5401_B0 &&
4124                             !(bmsr & BMSR_LSTATUS) &&
4125                             tp->link_config.active_speed == SPEED_1000) {
4126                                 err = tg3_phy_reset(tp);
4127                                 if (!err)
4128                                         err = tg3_init_5401phy_dsp(tp);
4129                                 if (err)
4130                                         return err;
4131                         }
4132                 }
4133         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4134                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4135                 /* 5701 {A0,B0} CRC bug workaround */
4136                 tg3_writephy(tp, 0x15, 0x0a75);
4137                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4138                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4139                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4140         }
4141
4142         /* Clear pending interrupts... */
4143         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4144         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4145
4146         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4147                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4148         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4149                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4150
4151         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4152             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4153                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4154                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4155                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4156                 else
4157                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4158         }
4159
4160         current_link_up = 0;
4161         current_speed = SPEED_INVALID;
4162         current_duplex = DUPLEX_INVALID;
4163         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4164         tp->link_config.rmt_adv = 0;
4165
4166         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4167                 err = tg3_phy_auxctl_read(tp,
4168                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4169                                           &val);
4170                 if (!err && !(val & (1 << 10))) {
4171                         tg3_phy_auxctl_write(tp,
4172                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4173                                              val | (1 << 10));
4174                         goto relink;
4175                 }
4176         }
4177
4178         bmsr = 0;
4179         for (i = 0; i < 100; i++) {
4180                 tg3_readphy(tp, MII_BMSR, &bmsr);
4181                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4182                     (bmsr & BMSR_LSTATUS))
4183                         break;
4184                 udelay(40);
4185         }
4186
4187         if (bmsr & BMSR_LSTATUS) {
4188                 u32 aux_stat, bmcr;
4189
4190                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4191                 for (i = 0; i < 2000; i++) {
4192                         udelay(10);
4193                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4194                             aux_stat)
4195                                 break;
4196                 }
4197
4198                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4199                                              &current_speed,
4200                                              &current_duplex);
4201
4202                 bmcr = 0;
4203                 for (i = 0; i < 200; i++) {
4204                         tg3_readphy(tp, MII_BMCR, &bmcr);
4205                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4206                                 continue;
4207                         if (bmcr && bmcr != 0x7fff)
4208                                 break;
4209                         udelay(10);
4210                 }
4211
4212                 lcl_adv = 0;
4213                 rmt_adv = 0;
4214
4215                 tp->link_config.active_speed = current_speed;
4216                 tp->link_config.active_duplex = current_duplex;
4217
4218                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4219                         if ((bmcr & BMCR_ANENABLE) &&
4220                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4221                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4222                                 current_link_up = 1;
4223                 } else {
4224                         if (!(bmcr & BMCR_ANENABLE) &&
4225                             tp->link_config.speed == current_speed &&
4226                             tp->link_config.duplex == current_duplex &&
4227                             tp->link_config.flowctrl ==
4228                             tp->link_config.active_flowctrl) {
4229                                 current_link_up = 1;
4230                         }
4231                 }
4232
4233                 if (current_link_up == 1 &&
4234                     tp->link_config.active_duplex == DUPLEX_FULL) {
4235                         u32 reg, bit;
4236
4237                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4238                                 reg = MII_TG3_FET_GEN_STAT;
4239                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4240                         } else {
4241                                 reg = MII_TG3_EXT_STAT;
4242                                 bit = MII_TG3_EXT_STAT_MDIX;
4243                         }
4244
4245                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4246                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4247
4248                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4249                 }
4250         }
4251
4252 relink:
4253         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4254                 tg3_phy_copper_begin(tp);
4255
4256                 tg3_readphy(tp, MII_BMSR, &bmsr);
4257                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4258                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4259                         current_link_up = 1;
4260         }
4261
4262         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4263         if (current_link_up == 1) {
4264                 if (tp->link_config.active_speed == SPEED_100 ||
4265                     tp->link_config.active_speed == SPEED_10)
4266                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4267                 else
4268                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4269         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4270                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4271         else
4272                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4273
4274         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4275         if (tp->link_config.active_duplex == DUPLEX_HALF)
4276                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4277
4278         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4279                 if (current_link_up == 1 &&
4280                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4281                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4282                 else
4283                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4284         }
4285
4286         /* ??? Without this setting Netgear GA302T PHY does not
4287          * ??? send/receive packets...
4288          */
4289         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4290             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4291                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4292                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4293                 udelay(80);
4294         }
4295
4296         tw32_f(MAC_MODE, tp->mac_mode);
4297         udelay(40);
4298
4299         tg3_phy_eee_adjust(tp, current_link_up);
4300
4301         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4302                 /* Polled via timer. */
4303                 tw32_f(MAC_EVENT, 0);
4304         } else {
4305                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4306         }
4307         udelay(40);
4308
4309         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4310             current_link_up == 1 &&
4311             tp->link_config.active_speed == SPEED_1000 &&
4312             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4313                 udelay(120);
4314                 tw32_f(MAC_STATUS,
4315                      (MAC_STATUS_SYNC_CHANGED |
4316                       MAC_STATUS_CFG_CHANGED));
4317                 udelay(40);
4318                 tg3_write_mem(tp,
4319                               NIC_SRAM_FIRMWARE_MBOX,
4320                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4321         }
4322
4323         /* Prevent send BD corruption. */
4324         if (tg3_flag(tp, CLKREQ_BUG)) {
4325                 u16 oldlnkctl, newlnkctl;
4326
4327                 pci_read_config_word(tp->pdev,
4328                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4329                                      &oldlnkctl);
4330                 if (tp->link_config.active_speed == SPEED_100 ||
4331                     tp->link_config.active_speed == SPEED_10)
4332                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4333                 else
4334                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4335                 if (newlnkctl != oldlnkctl)
4336                         pci_write_config_word(tp->pdev,
4337                                               pci_pcie_cap(tp->pdev) +
4338                                               PCI_EXP_LNKCTL, newlnkctl);
4339         }
4340
4341         if (current_link_up != netif_carrier_ok(tp->dev)) {
4342                 if (current_link_up)
4343                         netif_carrier_on(tp->dev);
4344                 else
4345                         netif_carrier_off(tp->dev);
4346                 tg3_link_report(tp);
4347         }
4348
4349         return 0;
4350 }
4351
4352 struct tg3_fiber_aneginfo {
4353         int state;
4354 #define ANEG_STATE_UNKNOWN              0
4355 #define ANEG_STATE_AN_ENABLE            1
4356 #define ANEG_STATE_RESTART_INIT         2
4357 #define ANEG_STATE_RESTART              3
4358 #define ANEG_STATE_DISABLE_LINK_OK      4
4359 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4360 #define ANEG_STATE_ABILITY_DETECT       6
4361 #define ANEG_STATE_ACK_DETECT_INIT      7
4362 #define ANEG_STATE_ACK_DETECT           8
4363 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4364 #define ANEG_STATE_COMPLETE_ACK         10
4365 #define ANEG_STATE_IDLE_DETECT_INIT     11
4366 #define ANEG_STATE_IDLE_DETECT          12
4367 #define ANEG_STATE_LINK_OK              13
4368 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4369 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4370
4371         u32 flags;
4372 #define MR_AN_ENABLE            0x00000001
4373 #define MR_RESTART_AN           0x00000002
4374 #define MR_AN_COMPLETE          0x00000004
4375 #define MR_PAGE_RX              0x00000008
4376 #define MR_NP_LOADED            0x00000010
4377 #define MR_TOGGLE_TX            0x00000020
4378 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4379 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4380 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4381 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4382 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4383 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4384 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4385 #define MR_TOGGLE_RX            0x00002000
4386 #define MR_NP_RX                0x00004000
4387
4388 #define MR_LINK_OK              0x80000000
4389
4390         unsigned long link_time, cur_time;
4391
4392         u32 ability_match_cfg;
4393         int ability_match_count;
4394
4395         char ability_match, idle_match, ack_match;
4396
4397         u32 txconfig, rxconfig;
4398 #define ANEG_CFG_NP             0x00000080
4399 #define ANEG_CFG_ACK            0x00000040
4400 #define ANEG_CFG_RF2            0x00000020
4401 #define ANEG_CFG_RF1            0x00000010
4402 #define ANEG_CFG_PS2            0x00000001
4403 #define ANEG_CFG_PS1            0x00008000
4404 #define ANEG_CFG_HD             0x00004000
4405 #define ANEG_CFG_FD             0x00002000
4406 #define ANEG_CFG_INVAL          0x00001f06
4407
4408 };
4409 #define ANEG_OK         0
4410 #define ANEG_DONE       1
4411 #define ANEG_TIMER_ENAB 2
4412 #define ANEG_FAILED     -1
4413
4414 #define ANEG_STATE_SETTLE_TIME  10000
4415
4416 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4417                                    struct tg3_fiber_aneginfo *ap)
4418 {
4419         u16 flowctrl;
4420         unsigned long delta;
4421         u32 rx_cfg_reg;
4422         int ret;
4423
4424         if (ap->state == ANEG_STATE_UNKNOWN) {
4425                 ap->rxconfig = 0;
4426                 ap->link_time = 0;
4427                 ap->cur_time = 0;
4428                 ap->ability_match_cfg = 0;
4429                 ap->ability_match_count = 0;
4430                 ap->ability_match = 0;
4431                 ap->idle_match = 0;
4432                 ap->ack_match = 0;
4433         }
4434         ap->cur_time++;
4435
4436         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4437                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4438
4439                 if (rx_cfg_reg != ap->ability_match_cfg) {
4440                         ap->ability_match_cfg = rx_cfg_reg;
4441                         ap->ability_match = 0;
4442                         ap->ability_match_count = 0;
4443                 } else {
4444                         if (++ap->ability_match_count > 1) {
4445                                 ap->ability_match = 1;
4446                                 ap->ability_match_cfg = rx_cfg_reg;
4447                         }
4448                 }
4449                 if (rx_cfg_reg & ANEG_CFG_ACK)
4450                         ap->ack_match = 1;
4451                 else
4452                         ap->ack_match = 0;
4453
4454                 ap->idle_match = 0;
4455         } else {
4456                 ap->idle_match = 1;
4457                 ap->ability_match_cfg = 0;
4458                 ap->ability_match_count = 0;
4459                 ap->ability_match = 0;
4460                 ap->ack_match = 0;
4461
4462                 rx_cfg_reg = 0;
4463         }
4464
4465         ap->rxconfig = rx_cfg_reg;
4466         ret = ANEG_OK;
4467
4468         switch (ap->state) {
4469         case ANEG_STATE_UNKNOWN:
4470                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4471                         ap->state = ANEG_STATE_AN_ENABLE;
4472
4473                 /* fallthru */
4474         case ANEG_STATE_AN_ENABLE:
4475                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4476                 if (ap->flags & MR_AN_ENABLE) {
4477                         ap->link_time = 0;
4478                         ap->cur_time = 0;
4479                         ap->ability_match_cfg = 0;
4480                         ap->ability_match_count = 0;
4481                         ap->ability_match = 0;
4482                         ap->idle_match = 0;
4483                         ap->ack_match = 0;
4484
4485                         ap->state = ANEG_STATE_RESTART_INIT;
4486                 } else {
4487                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4488                 }
4489                 break;
4490
4491         case ANEG_STATE_RESTART_INIT:
4492                 ap->link_time = ap->cur_time;
4493                 ap->flags &= ~(MR_NP_LOADED);
4494                 ap->txconfig = 0;
4495                 tw32(MAC_TX_AUTO_NEG, 0);
4496                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4497                 tw32_f(MAC_MODE, tp->mac_mode);
4498                 udelay(40);
4499
4500                 ret = ANEG_TIMER_ENAB;
4501                 ap->state = ANEG_STATE_RESTART;
4502
4503                 /* fallthru */
4504         case ANEG_STATE_RESTART:
4505                 delta = ap->cur_time - ap->link_time;
4506                 if (delta > ANEG_STATE_SETTLE_TIME)
4507                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4508                 else
4509                         ret = ANEG_TIMER_ENAB;
4510                 break;
4511
4512         case ANEG_STATE_DISABLE_LINK_OK:
4513                 ret = ANEG_DONE;
4514                 break;
4515
4516         case ANEG_STATE_ABILITY_DETECT_INIT:
4517                 ap->flags &= ~(MR_TOGGLE_TX);
4518                 ap->txconfig = ANEG_CFG_FD;
4519                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4520                 if (flowctrl & ADVERTISE_1000XPAUSE)
4521                         ap->txconfig |= ANEG_CFG_PS1;
4522                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4523                         ap->txconfig |= ANEG_CFG_PS2;
4524                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4525                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4526                 tw32_f(MAC_MODE, tp->mac_mode);
4527                 udelay(40);
4528
4529                 ap->state = ANEG_STATE_ABILITY_DETECT;
4530                 break;
4531
4532         case ANEG_STATE_ABILITY_DETECT:
4533                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4534                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4535                 break;
4536
4537         case ANEG_STATE_ACK_DETECT_INIT:
4538                 ap->txconfig |= ANEG_CFG_ACK;
4539                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4540                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4541                 tw32_f(MAC_MODE, tp->mac_mode);
4542                 udelay(40);
4543
4544                 ap->state = ANEG_STATE_ACK_DETECT;
4545
4546                 /* fallthru */
4547         case ANEG_STATE_ACK_DETECT:
4548                 if (ap->ack_match != 0) {
4549                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4550                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4551                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4552                         } else {
4553                                 ap->state = ANEG_STATE_AN_ENABLE;
4554                         }
4555                 } else if (ap->ability_match != 0 &&
4556                            ap->rxconfig == 0) {
4557                         ap->state = ANEG_STATE_AN_ENABLE;
4558                 }
4559                 break;
4560
4561         case ANEG_STATE_COMPLETE_ACK_INIT:
4562                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4563                         ret = ANEG_FAILED;
4564                         break;
4565                 }
4566                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4567                                MR_LP_ADV_HALF_DUPLEX |
4568                                MR_LP_ADV_SYM_PAUSE |
4569                                MR_LP_ADV_ASYM_PAUSE |
4570                                MR_LP_ADV_REMOTE_FAULT1 |
4571                                MR_LP_ADV_REMOTE_FAULT2 |
4572                                MR_LP_ADV_NEXT_PAGE |
4573                                MR_TOGGLE_RX |
4574                                MR_NP_RX);
4575                 if (ap->rxconfig & ANEG_CFG_FD)
4576                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4577                 if (ap->rxconfig & ANEG_CFG_HD)
4578                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4579                 if (ap->rxconfig & ANEG_CFG_PS1)
4580                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4581                 if (ap->rxconfig & ANEG_CFG_PS2)
4582                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4583                 if (ap->rxconfig & ANEG_CFG_RF1)
4584                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4585                 if (ap->rxconfig & ANEG_CFG_RF2)
4586                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4587                 if (ap->rxconfig & ANEG_CFG_NP)
4588                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4589
4590                 ap->link_time = ap->cur_time;
4591
4592                 ap->flags ^= (MR_TOGGLE_TX);
4593                 if (ap->rxconfig & 0x0008)
4594                         ap->flags |= MR_TOGGLE_RX;
4595                 if (ap->rxconfig & ANEG_CFG_NP)
4596                         ap->flags |= MR_NP_RX;
4597                 ap->flags |= MR_PAGE_RX;
4598
4599                 ap->state = ANEG_STATE_COMPLETE_ACK;
4600                 ret = ANEG_TIMER_ENAB;
4601                 break;
4602
4603         case ANEG_STATE_COMPLETE_ACK:
4604                 if (ap->ability_match != 0 &&
4605                     ap->rxconfig == 0) {
4606                         ap->state = ANEG_STATE_AN_ENABLE;
4607                         break;
4608                 }
4609                 delta = ap->cur_time - ap->link_time;
4610                 if (delta > ANEG_STATE_SETTLE_TIME) {
4611                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4612                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4613                         } else {
4614                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4615                                     !(ap->flags & MR_NP_RX)) {
4616                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4617                                 } else {
4618                                         ret = ANEG_FAILED;
4619                                 }
4620                         }
4621                 }
4622                 break;
4623
4624         case ANEG_STATE_IDLE_DETECT_INIT:
4625                 ap->link_time = ap->cur_time;
4626                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4627                 tw32_f(MAC_MODE, tp->mac_mode);
4628                 udelay(40);
4629
4630                 ap->state = ANEG_STATE_IDLE_DETECT;
4631                 ret = ANEG_TIMER_ENAB;
4632                 break;
4633
4634         case ANEG_STATE_IDLE_DETECT:
4635                 if (ap->ability_match != 0 &&
4636                     ap->rxconfig == 0) {
4637                         ap->state = ANEG_STATE_AN_ENABLE;
4638                         break;
4639                 }
4640                 delta = ap->cur_time - ap->link_time;
4641                 if (delta > ANEG_STATE_SETTLE_TIME) {
4642                         /* XXX another gem from the Broadcom driver :( */
4643                         ap->state = ANEG_STATE_LINK_OK;
4644                 }
4645                 break;
4646
4647         case ANEG_STATE_LINK_OK:
4648                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4649                 ret = ANEG_DONE;
4650                 break;
4651
4652         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4653                 /* ??? unimplemented */
4654                 break;
4655
4656         case ANEG_STATE_NEXT_PAGE_WAIT:
4657                 /* ??? unimplemented */
4658                 break;
4659
4660         default:
4661                 ret = ANEG_FAILED;
4662                 break;
4663         }
4664
4665         return ret;
4666 }
4667
4668 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4669 {
4670         int res = 0;
4671         struct tg3_fiber_aneginfo aninfo;
4672         int status = ANEG_FAILED;
4673         unsigned int tick;
4674         u32 tmp;
4675
4676         tw32_f(MAC_TX_AUTO_NEG, 0);
4677
4678         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4679         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4680         udelay(40);
4681
4682         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4683         udelay(40);
4684
4685         memset(&aninfo, 0, sizeof(aninfo));
4686         aninfo.flags |= MR_AN_ENABLE;
4687         aninfo.state = ANEG_STATE_UNKNOWN;
4688         aninfo.cur_time = 0;
4689         tick = 0;
4690         while (++tick < 195000) {
4691                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4692                 if (status == ANEG_DONE || status == ANEG_FAILED)
4693                         break;
4694
4695                 udelay(1);
4696         }
4697
4698         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4699         tw32_f(MAC_MODE, tp->mac_mode);
4700         udelay(40);
4701
4702         *txflags = aninfo.txconfig;
4703         *rxflags = aninfo.flags;
4704
4705         if (status == ANEG_DONE &&
4706             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4707                              MR_LP_ADV_FULL_DUPLEX)))
4708                 res = 1;
4709
4710         return res;
4711 }
4712
4713 static void tg3_init_bcm8002(struct tg3 *tp)
4714 {
4715         u32 mac_status = tr32(MAC_STATUS);
4716         int i;
4717
4718         /* Reset when initting first time or we have a link. */
4719         if (tg3_flag(tp, INIT_COMPLETE) &&
4720             !(mac_status & MAC_STATUS_PCS_SYNCED))
4721                 return;
4722
4723         /* Set PLL lock range. */
4724         tg3_writephy(tp, 0x16, 0x8007);
4725
4726         /* SW reset */
4727         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4728
4729         /* Wait for reset to complete. */
4730         /* XXX schedule_timeout() ... */
4731         for (i = 0; i < 500; i++)
4732                 udelay(10);
4733
4734         /* Config mode; select PMA/Ch 1 regs. */
4735         tg3_writephy(tp, 0x10, 0x8411);
4736
4737         /* Enable auto-lock and comdet, select txclk for tx. */
4738         tg3_writephy(tp, 0x11, 0x0a10);
4739
4740         tg3_writephy(tp, 0x18, 0x00a0);
4741         tg3_writephy(tp, 0x16, 0x41ff);
4742
4743         /* Assert and deassert POR. */
4744         tg3_writephy(tp, 0x13, 0x0400);
4745         udelay(40);
4746         tg3_writephy(tp, 0x13, 0x0000);
4747
4748         tg3_writephy(tp, 0x11, 0x0a50);
4749         udelay(40);
4750         tg3_writephy(tp, 0x11, 0x0a10);
4751
4752         /* Wait for signal to stabilize */
4753         /* XXX schedule_timeout() ... */
4754         for (i = 0; i < 15000; i++)
4755                 udelay(10);
4756
4757         /* Deselect the channel register so we can read the PHYID
4758          * later.
4759          */
4760         tg3_writephy(tp, 0x10, 0x8011);
4761 }
4762
4763 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4764 {
4765         u16 flowctrl;
4766         u32 sg_dig_ctrl, sg_dig_status;
4767         u32 serdes_cfg, expected_sg_dig_ctrl;
4768         int workaround, port_a;
4769         int current_link_up;
4770
4771         serdes_cfg = 0;
4772         expected_sg_dig_ctrl = 0;
4773         workaround = 0;
4774         port_a = 1;
4775         current_link_up = 0;
4776
4777         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4778             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4779                 workaround = 1;
4780                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4781                         port_a = 0;
4782
4783                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4784                 /* preserve bits 20-23 for voltage regulator */
4785                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4786         }
4787
4788         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4789
4790         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4791                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4792                         if (workaround) {
4793                                 u32 val = serdes_cfg;
4794
4795                                 if (port_a)
4796                                         val |= 0xc010000;
4797                                 else
4798                                         val |= 0x4010000;
4799                                 tw32_f(MAC_SERDES_CFG, val);
4800                         }
4801
4802                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4803                 }
4804                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4805                         tg3_setup_flow_control(tp, 0, 0);
4806                         current_link_up = 1;
4807                 }
4808                 goto out;
4809         }
4810
4811         /* Want auto-negotiation.  */
4812         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4813
4814         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4815         if (flowctrl & ADVERTISE_1000XPAUSE)
4816                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4817         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4818                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4819
4820         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4821                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4822                     tp->serdes_counter &&
4823                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4824                                     MAC_STATUS_RCVD_CFG)) ==
4825                      MAC_STATUS_PCS_SYNCED)) {
4826                         tp->serdes_counter--;
4827                         current_link_up = 1;
4828                         goto out;
4829                 }
4830 restart_autoneg:
4831                 if (workaround)
4832                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4833                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4834                 udelay(5);
4835                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4836
4837                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4838                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4839         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4840                                  MAC_STATUS_SIGNAL_DET)) {
4841                 sg_dig_status = tr32(SG_DIG_STATUS);
4842                 mac_status = tr32(MAC_STATUS);
4843
4844                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4845                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4846                         u32 local_adv = 0, remote_adv = 0;
4847
4848                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4849                                 local_adv |= ADVERTISE_1000XPAUSE;
4850                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4851                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4852
4853                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4854                                 remote_adv |= LPA_1000XPAUSE;
4855                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4856                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4857
4858                         tp->link_config.rmt_adv =
4859                                            mii_adv_to_ethtool_adv_x(remote_adv);
4860
4861                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4862                         current_link_up = 1;
4863                         tp->serdes_counter = 0;
4864                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4865                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4866                         if (tp->serdes_counter)
4867                                 tp->serdes_counter--;
4868                         else {
4869                                 if (workaround) {
4870                                         u32 val = serdes_cfg;
4871
4872                                         if (port_a)
4873                                                 val |= 0xc010000;
4874                                         else
4875                                                 val |= 0x4010000;
4876
4877                                         tw32_f(MAC_SERDES_CFG, val);
4878                                 }
4879
4880                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4881                                 udelay(40);
4882
4883                                 /* Link parallel detection - link is up */
4884                                 /* only if we have PCS_SYNC and not */
4885                                 /* receiving config code words */
4886                                 mac_status = tr32(MAC_STATUS);
4887                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4888                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4889                                         tg3_setup_flow_control(tp, 0, 0);
4890                                         current_link_up = 1;
4891                                         tp->phy_flags |=
4892                                                 TG3_PHYFLG_PARALLEL_DETECT;
4893                                         tp->serdes_counter =
4894                                                 SERDES_PARALLEL_DET_TIMEOUT;
4895                                 } else
4896                                         goto restart_autoneg;
4897                         }
4898                 }
4899         } else {
4900                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4901                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4902         }
4903
4904 out:
4905         return current_link_up;
4906 }
4907
4908 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4909 {
4910         int current_link_up = 0;
4911
4912         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4913                 goto out;
4914
4915         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4916                 u32 txflags, rxflags;
4917                 int i;
4918
4919                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4920                         u32 local_adv = 0, remote_adv = 0;
4921
4922                         if (txflags & ANEG_CFG_PS1)
4923                                 local_adv |= ADVERTISE_1000XPAUSE;
4924                         if (txflags & ANEG_CFG_PS2)
4925                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4926
4927                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4928                                 remote_adv |= LPA_1000XPAUSE;
4929                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4930                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4931
4932                         tp->link_config.rmt_adv =
4933                                            mii_adv_to_ethtool_adv_x(remote_adv);
4934
4935                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4936
4937                         current_link_up = 1;
4938                 }
4939                 for (i = 0; i < 30; i++) {
4940                         udelay(20);
4941                         tw32_f(MAC_STATUS,
4942                                (MAC_STATUS_SYNC_CHANGED |
4943                                 MAC_STATUS_CFG_CHANGED));
4944                         udelay(40);
4945                         if ((tr32(MAC_STATUS) &
4946                              (MAC_STATUS_SYNC_CHANGED |
4947                               MAC_STATUS_CFG_CHANGED)) == 0)
4948                                 break;
4949                 }
4950
4951                 mac_status = tr32(MAC_STATUS);
4952                 if (current_link_up == 0 &&
4953                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4954                     !(mac_status & MAC_STATUS_RCVD_CFG))
4955                         current_link_up = 1;
4956         } else {
4957                 tg3_setup_flow_control(tp, 0, 0);
4958
4959                 /* Forcing 1000FD link up. */
4960                 current_link_up = 1;
4961
4962                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4963                 udelay(40);
4964
4965                 tw32_f(MAC_MODE, tp->mac_mode);
4966                 udelay(40);
4967         }
4968
4969 out:
4970         return current_link_up;
4971 }
4972
4973 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4974 {
4975         u32 orig_pause_cfg;
4976         u16 orig_active_speed;
4977         u8 orig_active_duplex;
4978         u32 mac_status;
4979         int current_link_up;
4980         int i;
4981
4982         orig_pause_cfg = tp->link_config.active_flowctrl;
4983         orig_active_speed = tp->link_config.active_speed;
4984         orig_active_duplex = tp->link_config.active_duplex;
4985
4986         if (!tg3_flag(tp, HW_AUTONEG) &&
4987             netif_carrier_ok(tp->dev) &&
4988             tg3_flag(tp, INIT_COMPLETE)) {
4989                 mac_status = tr32(MAC_STATUS);
4990                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4991                                MAC_STATUS_SIGNAL_DET |
4992                                MAC_STATUS_CFG_CHANGED |
4993                                MAC_STATUS_RCVD_CFG);
4994                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4995                                    MAC_STATUS_SIGNAL_DET)) {
4996                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4997                                             MAC_STATUS_CFG_CHANGED));
4998                         return 0;
4999                 }
5000         }
5001
5002         tw32_f(MAC_TX_AUTO_NEG, 0);
5003
5004         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5005         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5006         tw32_f(MAC_MODE, tp->mac_mode);
5007         udelay(40);
5008
5009         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5010                 tg3_init_bcm8002(tp);
5011
5012         /* Enable link change event even when serdes polling.  */
5013         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5014         udelay(40);
5015
5016         current_link_up = 0;
5017         tp->link_config.rmt_adv = 0;
5018         mac_status = tr32(MAC_STATUS);
5019
5020         if (tg3_flag(tp, HW_AUTONEG))
5021                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5022         else
5023                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5024
5025         tp->napi[0].hw_status->status =
5026                 (SD_STATUS_UPDATED |
5027                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5028
5029         for (i = 0; i < 100; i++) {
5030                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5031                                     MAC_STATUS_CFG_CHANGED));
5032                 udelay(5);
5033                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5034                                          MAC_STATUS_CFG_CHANGED |
5035                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5036                         break;
5037         }
5038
5039         mac_status = tr32(MAC_STATUS);
5040         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5041                 current_link_up = 0;
5042                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5043                     tp->serdes_counter == 0) {
5044                         tw32_f(MAC_MODE, (tp->mac_mode |
5045                                           MAC_MODE_SEND_CONFIGS));
5046                         udelay(1);
5047                         tw32_f(MAC_MODE, tp->mac_mode);
5048                 }
5049         }
5050
5051         if (current_link_up == 1) {
5052                 tp->link_config.active_speed = SPEED_1000;
5053                 tp->link_config.active_duplex = DUPLEX_FULL;
5054                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5055                                     LED_CTRL_LNKLED_OVERRIDE |
5056                                     LED_CTRL_1000MBPS_ON));
5057         } else {
5058                 tp->link_config.active_speed = SPEED_INVALID;
5059                 tp->link_config.active_duplex = DUPLEX_INVALID;
5060                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5061                                     LED_CTRL_LNKLED_OVERRIDE |
5062                                     LED_CTRL_TRAFFIC_OVERRIDE));
5063         }
5064
5065         if (current_link_up != netif_carrier_ok(tp->dev)) {
5066                 if (current_link_up)
5067                         netif_carrier_on(tp->dev);
5068                 else
5069                         netif_carrier_off(tp->dev);
5070                 tg3_link_report(tp);
5071         } else {
5072                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5073                 if (orig_pause_cfg != now_pause_cfg ||
5074                     orig_active_speed != tp->link_config.active_speed ||
5075                     orig_active_duplex != tp->link_config.active_duplex)
5076                         tg3_link_report(tp);
5077         }
5078
5079         return 0;
5080 }
5081
5082 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5083 {
5084         int current_link_up, err = 0;
5085         u32 bmsr, bmcr;
5086         u16 current_speed;
5087         u8 current_duplex;
5088         u32 local_adv, remote_adv;
5089
5090         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5091         tw32_f(MAC_MODE, tp->mac_mode);
5092         udelay(40);
5093
5094         tw32(MAC_EVENT, 0);
5095
5096         tw32_f(MAC_STATUS,
5097              (MAC_STATUS_SYNC_CHANGED |
5098               MAC_STATUS_CFG_CHANGED |
5099               MAC_STATUS_MI_COMPLETION |
5100               MAC_STATUS_LNKSTATE_CHANGED));
5101         udelay(40);
5102
5103         if (force_reset)
5104                 tg3_phy_reset(tp);
5105
5106         current_link_up = 0;
5107         current_speed = SPEED_INVALID;
5108         current_duplex = DUPLEX_INVALID;
5109         tp->link_config.rmt_adv = 0;
5110
5111         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5112         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5113         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5114                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5115                         bmsr |= BMSR_LSTATUS;
5116                 else
5117                         bmsr &= ~BMSR_LSTATUS;
5118         }
5119
5120         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5121
5122         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5123             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5124                 /* do nothing, just check for link up at the end */
5125         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5126                 u32 adv, newadv;
5127
5128                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5129                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5130                                  ADVERTISE_1000XPAUSE |
5131                                  ADVERTISE_1000XPSE_ASYM |
5132                                  ADVERTISE_SLCT);
5133
5134                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5135                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5136
5137                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5138                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5139                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5140                         tg3_writephy(tp, MII_BMCR, bmcr);
5141
5142                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5143                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5144                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5145
5146                         return err;
5147                 }
5148         } else {
5149                 u32 new_bmcr;
5150
5151                 bmcr &= ~BMCR_SPEED1000;
5152                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5153
5154                 if (tp->link_config.duplex == DUPLEX_FULL)
5155                         new_bmcr |= BMCR_FULLDPLX;
5156
5157                 if (new_bmcr != bmcr) {
5158                         /* BMCR_SPEED1000 is a reserved bit that needs
5159                          * to be set on write.
5160                          */
5161                         new_bmcr |= BMCR_SPEED1000;
5162
5163                         /* Force a linkdown */
5164                         if (netif_carrier_ok(tp->dev)) {
5165                                 u32 adv;
5166
5167                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5168                                 adv &= ~(ADVERTISE_1000XFULL |
5169                                          ADVERTISE_1000XHALF |
5170                                          ADVERTISE_SLCT);
5171                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5172                                 tg3_writephy(tp, MII_BMCR, bmcr |
5173                                                            BMCR_ANRESTART |
5174                                                            BMCR_ANENABLE);
5175                                 udelay(10);
5176                                 netif_carrier_off(tp->dev);
5177                         }
5178                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5179                         bmcr = new_bmcr;
5180                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5181                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5182                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5183                             ASIC_REV_5714) {
5184                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5185                                         bmsr |= BMSR_LSTATUS;
5186                                 else
5187                                         bmsr &= ~BMSR_LSTATUS;
5188                         }
5189                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5190                 }
5191         }
5192
5193         if (bmsr & BMSR_LSTATUS) {
5194                 current_speed = SPEED_1000;
5195                 current_link_up = 1;
5196                 if (bmcr & BMCR_FULLDPLX)
5197                         current_duplex = DUPLEX_FULL;
5198                 else
5199                         current_duplex = DUPLEX_HALF;
5200
5201                 local_adv = 0;
5202                 remote_adv = 0;
5203
5204                 if (bmcr & BMCR_ANENABLE) {
5205                         u32 common;
5206
5207                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5208                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5209                         common = local_adv & remote_adv;
5210                         if (common & (ADVERTISE_1000XHALF |
5211                                       ADVERTISE_1000XFULL)) {
5212                                 if (common & ADVERTISE_1000XFULL)
5213                                         current_duplex = DUPLEX_FULL;
5214                                 else
5215                                         current_duplex = DUPLEX_HALF;
5216
5217                                 tp->link_config.rmt_adv =
5218                                            mii_adv_to_ethtool_adv_x(remote_adv);
5219                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5220                                 /* Link is up via parallel detect */
5221                         } else {
5222                                 current_link_up = 0;
5223                         }
5224                 }
5225         }
5226
5227         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5228                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5229
5230         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5231         if (tp->link_config.active_duplex == DUPLEX_HALF)
5232                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5233
5234         tw32_f(MAC_MODE, tp->mac_mode);
5235         udelay(40);
5236
5237         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5238
5239         tp->link_config.active_speed = current_speed;
5240         tp->link_config.active_duplex = current_duplex;
5241
5242         if (current_link_up != netif_carrier_ok(tp->dev)) {
5243                 if (current_link_up)
5244                         netif_carrier_on(tp->dev);
5245                 else {
5246                         netif_carrier_off(tp->dev);
5247                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5248                 }
5249                 tg3_link_report(tp);
5250         }
5251         return err;
5252 }
5253
5254 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5255 {
5256         if (tp->serdes_counter) {
5257                 /* Give autoneg time to complete. */
5258                 tp->serdes_counter--;
5259                 return;
5260         }
5261
5262         if (!netif_carrier_ok(tp->dev) &&
5263             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5264                 u32 bmcr;
5265
5266                 tg3_readphy(tp, MII_BMCR, &bmcr);
5267                 if (bmcr & BMCR_ANENABLE) {
5268                         u32 phy1, phy2;
5269
5270                         /* Select shadow register 0x1f */
5271                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5272                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5273
5274                         /* Select expansion interrupt status register */
5275                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5276                                          MII_TG3_DSP_EXP1_INT_STAT);
5277                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5278                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5279
5280                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5281                                 /* We have signal detect and not receiving
5282                                  * config code words, link is up by parallel
5283                                  * detection.
5284                                  */
5285
5286                                 bmcr &= ~BMCR_ANENABLE;
5287                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5288                                 tg3_writephy(tp, MII_BMCR, bmcr);
5289                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5290                         }
5291                 }
5292         } else if (netif_carrier_ok(tp->dev) &&
5293                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5294                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5295                 u32 phy2;
5296
5297                 /* Select expansion interrupt status register */
5298                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5299                                  MII_TG3_DSP_EXP1_INT_STAT);
5300                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5301                 if (phy2 & 0x20) {
5302                         u32 bmcr;
5303
5304                         /* Config code words received, turn on autoneg. */
5305                         tg3_readphy(tp, MII_BMCR, &bmcr);
5306                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5307
5308                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5309
5310                 }
5311         }
5312 }
5313
5314 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5315 {
5316         u32 val;
5317         int err;
5318
5319         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5320                 err = tg3_setup_fiber_phy(tp, force_reset);
5321         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5322                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5323         else
5324                 err = tg3_setup_copper_phy(tp, force_reset);
5325
5326         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5327                 u32 scale;
5328
5329                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5330                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5331                         scale = 65;
5332                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5333                         scale = 6;
5334                 else
5335                         scale = 12;
5336
5337                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5338                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5339                 tw32(GRC_MISC_CFG, val);
5340         }
5341
5342         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5343               (6 << TX_LENGTHS_IPG_SHIFT);
5344         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5345                 val |= tr32(MAC_TX_LENGTHS) &
5346                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5347                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5348
5349         if (tp->link_config.active_speed == SPEED_1000 &&
5350             tp->link_config.active_duplex == DUPLEX_HALF)
5351                 tw32(MAC_TX_LENGTHS, val |
5352                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5353         else
5354                 tw32(MAC_TX_LENGTHS, val |
5355                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5356
5357         if (!tg3_flag(tp, 5705_PLUS)) {
5358                 if (netif_carrier_ok(tp->dev)) {
5359                         tw32(HOSTCC_STAT_COAL_TICKS,
5360                              tp->coal.stats_block_coalesce_usecs);
5361                 } else {
5362                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5363                 }
5364         }
5365
5366         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5367                 val = tr32(PCIE_PWR_MGMT_THRESH);
5368                 if (!netif_carrier_ok(tp->dev))
5369                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5370                               tp->pwrmgmt_thresh;
5371                 else
5372                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5373                 tw32(PCIE_PWR_MGMT_THRESH, val);
5374         }
5375
5376         return err;
5377 }
5378
5379 static inline int tg3_irq_sync(struct tg3 *tp)
5380 {
5381         return tp->irq_sync;
5382 }
5383
5384 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5385 {
5386         int i;
5387
5388         dst = (u32 *)((u8 *)dst + off);
5389         for (i = 0; i < len; i += sizeof(u32))
5390                 *dst++ = tr32(off + i);
5391 }
5392
5393 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5394 {
5395         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5396         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5397         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5398         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5399         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5400         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5401         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5402         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5403         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5404         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5405         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5406         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5407         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5408         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5409         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5410         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5411         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5412         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5413         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5414
5415         if (tg3_flag(tp, SUPPORT_MSIX))
5416                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5417
5418         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5419         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5420         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5421         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5422         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5423         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5424         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5425         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5426
5427         if (!tg3_flag(tp, 5705_PLUS)) {
5428                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5429                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5430                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5431         }
5432
5433         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5434         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5435         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5436         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5437         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5438
5439         if (tg3_flag(tp, NVRAM))
5440                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5441 }
5442
5443 static void tg3_dump_state(struct tg3 *tp)
5444 {
5445         int i;
5446         u32 *regs;
5447
5448         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5449         if (!regs) {
5450                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5451                 return;
5452         }
5453
5454         if (tg3_flag(tp, PCI_EXPRESS)) {
5455                 /* Read up to but not including private PCI registers */
5456                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5457                         regs[i / sizeof(u32)] = tr32(i);
5458         } else
5459                 tg3_dump_legacy_regs(tp, regs);
5460
5461         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5462                 if (!regs[i + 0] && !regs[i + 1] &&
5463                     !regs[i + 2] && !regs[i + 3])
5464                         continue;
5465
5466                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5467                            i * 4,
5468                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5469         }
5470
5471         kfree(regs);
5472
5473         for (i = 0; i < tp->irq_cnt; i++) {
5474                 struct tg3_napi *tnapi = &tp->napi[i];
5475
5476                 /* SW status block */
5477                 netdev_err(tp->dev,
5478                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5479                            i,
5480                            tnapi->hw_status->status,
5481                            tnapi->hw_status->status_tag,
5482                            tnapi->hw_status->rx_jumbo_consumer,
5483                            tnapi->hw_status->rx_consumer,
5484                            tnapi->hw_status->rx_mini_consumer,
5485                            tnapi->hw_status->idx[0].rx_producer,
5486                            tnapi->hw_status->idx[0].tx_consumer);
5487
5488                 netdev_err(tp->dev,
5489                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5490                            i,
5491                            tnapi->last_tag, tnapi->last_irq_tag,
5492                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5493                            tnapi->rx_rcb_ptr,
5494                            tnapi->prodring.rx_std_prod_idx,
5495                            tnapi->prodring.rx_std_cons_idx,
5496                            tnapi->prodring.rx_jmb_prod_idx,
5497                            tnapi->prodring.rx_jmb_cons_idx);
5498         }
5499 }
5500
5501 /* This is called whenever we suspect that the system chipset is re-
5502  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5503  * is bogus tx completions. We try to recover by setting the
5504  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5505  * in the workqueue.
5506  */
5507 static void tg3_tx_recover(struct tg3 *tp)
5508 {
5509         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5510                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5511
5512         netdev_warn(tp->dev,
5513                     "The system may be re-ordering memory-mapped I/O "
5514                     "cycles to the network device, attempting to recover. "
5515                     "Please report the problem to the driver maintainer "
5516                     "and include system chipset information.\n");
5517
5518         spin_lock(&tp->lock);
5519         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5520         spin_unlock(&tp->lock);
5521 }
5522
5523 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5524 {
5525         /* Tell compiler to fetch tx indices from memory. */
5526         barrier();
5527         return tnapi->tx_pending -
5528                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5529 }
5530
5531 /* Tigon3 never reports partial packet sends.  So we do not
5532  * need special logic to handle SKBs that have not had all
5533  * of their frags sent yet, like SunGEM does.
5534  */
5535 static void tg3_tx(struct tg3_napi *tnapi)
5536 {
5537         struct tg3 *tp = tnapi->tp;
5538         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5539         u32 sw_idx = tnapi->tx_cons;
5540         struct netdev_queue *txq;
5541         int index = tnapi - tp->napi;
5542         unsigned int pkts_compl = 0, bytes_compl = 0;
5543
5544         if (tg3_flag(tp, ENABLE_TSS))
5545                 index--;
5546
5547         txq = netdev_get_tx_queue(tp->dev, index);
5548
5549         while (sw_idx != hw_idx) {
5550                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5551                 struct sk_buff *skb = ri->skb;
5552                 int i, tx_bug = 0;
5553
5554                 if (unlikely(skb == NULL)) {
5555                         tg3_tx_recover(tp);
5556                         return;
5557                 }
5558
5559                 pci_unmap_single(tp->pdev,
5560                                  dma_unmap_addr(ri, mapping),
5561                                  skb_headlen(skb),
5562                                  PCI_DMA_TODEVICE);
5563
5564                 ri->skb = NULL;
5565
5566                 while (ri->fragmented) {
5567                         ri->fragmented = false;
5568                         sw_idx = NEXT_TX(sw_idx);
5569                         ri = &tnapi->tx_buffers[sw_idx];
5570                 }
5571
5572                 sw_idx = NEXT_TX(sw_idx);
5573
5574                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5575                         ri = &tnapi->tx_buffers[sw_idx];
5576                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5577                                 tx_bug = 1;
5578
5579                         pci_unmap_page(tp->pdev,
5580                                        dma_unmap_addr(ri, mapping),
5581                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5582                                        PCI_DMA_TODEVICE);
5583
5584                         while (ri->fragmented) {
5585                                 ri->fragmented = false;
5586                                 sw_idx = NEXT_TX(sw_idx);
5587                                 ri = &tnapi->tx_buffers[sw_idx];
5588                         }
5589
5590                         sw_idx = NEXT_TX(sw_idx);
5591                 }
5592
5593                 pkts_compl++;
5594                 bytes_compl += skb->len;
5595
5596                 dev_kfree_skb(skb);
5597
5598                 if (unlikely(tx_bug)) {
5599                         tg3_tx_recover(tp);
5600                         return;
5601                 }
5602         }
5603
5604         netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
5605
5606         tnapi->tx_cons = sw_idx;
5607
5608         /* Need to make the tx_cons update visible to tg3_start_xmit()
5609          * before checking for netif_queue_stopped().  Without the
5610          * memory barrier, there is a small possibility that tg3_start_xmit()
5611          * will miss it and cause the queue to be stopped forever.
5612          */
5613         smp_mb();
5614
5615         if (unlikely(netif_tx_queue_stopped(txq) &&
5616                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5617                 __netif_tx_lock(txq, smp_processor_id());
5618                 if (netif_tx_queue_stopped(txq) &&
5619                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5620                         netif_tx_wake_queue(txq);
5621                 __netif_tx_unlock(txq);
5622         }
5623 }
5624
5625 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5626 {
5627         if (!ri->data)
5628                 return;
5629
5630         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5631                          map_sz, PCI_DMA_FROMDEVICE);
5632         kfree(ri->data);
5633         ri->data = NULL;
5634 }
5635
5636 /* Returns size of skb allocated or < 0 on error.
5637  *
5638  * We only need to fill in the address because the other members
5639  * of the RX descriptor are invariant, see tg3_init_rings.
5640  *
5641  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5642  * posting buffers we only dirty the first cache line of the RX
5643  * descriptor (containing the address).  Whereas for the RX status
5644  * buffers the cpu only reads the last cacheline of the RX descriptor
5645  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5646  */
5647 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5648                             u32 opaque_key, u32 dest_idx_unmasked)
5649 {
5650         struct tg3_rx_buffer_desc *desc;
5651         struct ring_info *map;
5652         u8 *data;
5653         dma_addr_t mapping;
5654         int skb_size, data_size, dest_idx;
5655
5656         switch (opaque_key) {
5657         case RXD_OPAQUE_RING_STD:
5658                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5659                 desc = &tpr->rx_std[dest_idx];
5660                 map = &tpr->rx_std_buffers[dest_idx];
5661                 data_size = tp->rx_pkt_map_sz;
5662                 break;
5663
5664         case RXD_OPAQUE_RING_JUMBO:
5665                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5666                 desc = &tpr->rx_jmb[dest_idx].std;
5667                 map = &tpr->rx_jmb_buffers[dest_idx];
5668                 data_size = TG3_RX_JMB_MAP_SZ;
5669                 break;
5670
5671         default:
5672                 return -EINVAL;
5673         }
5674
5675         /* Do not overwrite any of the map or rp information
5676          * until we are sure we can commit to a new buffer.
5677          *
5678          * Callers depend upon this behavior and assume that
5679          * we leave everything unchanged if we fail.
5680          */
5681         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5682                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5683         data = kmalloc(skb_size, GFP_ATOMIC);
5684         if (!data)
5685                 return -ENOMEM;
5686
5687         mapping = pci_map_single(tp->pdev,
5688                                  data + TG3_RX_OFFSET(tp),
5689                                  data_size,
5690                                  PCI_DMA_FROMDEVICE);
5691         if (pci_dma_mapping_error(tp->pdev, mapping)) {
5692                 kfree(data);
5693                 return -EIO;
5694         }
5695
5696         map->data = data;
5697         dma_unmap_addr_set(map, mapping, mapping);
5698
5699         desc->addr_hi = ((u64)mapping >> 32);
5700         desc->addr_lo = ((u64)mapping & 0xffffffff);
5701
5702         return data_size;
5703 }
5704
5705 /* We only need to move over in the address because the other
5706  * members of the RX descriptor are invariant.  See notes above
5707  * tg3_alloc_rx_data for full details.
5708  */
5709 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5710                            struct tg3_rx_prodring_set *dpr,
5711                            u32 opaque_key, int src_idx,
5712                            u32 dest_idx_unmasked)
5713 {
5714         struct tg3 *tp = tnapi->tp;
5715         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5716         struct ring_info *src_map, *dest_map;
5717         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5718         int dest_idx;
5719
5720         switch (opaque_key) {
5721         case RXD_OPAQUE_RING_STD:
5722                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5723                 dest_desc = &dpr->rx_std[dest_idx];
5724                 dest_map = &dpr->rx_std_buffers[dest_idx];
5725                 src_desc = &spr->rx_std[src_idx];
5726                 src_map = &spr->rx_std_buffers[src_idx];
5727                 break;
5728
5729         case RXD_OPAQUE_RING_JUMBO:
5730                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5731                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5732                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5733                 src_desc = &spr->rx_jmb[src_idx].std;
5734                 src_map = &spr->rx_jmb_buffers[src_idx];
5735                 break;
5736
5737         default:
5738                 return;
5739         }
5740
5741         dest_map->data = src_map->data;
5742         dma_unmap_addr_set(dest_map, mapping,
5743                            dma_unmap_addr(src_map, mapping));
5744         dest_desc->addr_hi = src_desc->addr_hi;
5745         dest_desc->addr_lo = src_desc->addr_lo;
5746
5747         /* Ensure that the update to the skb happens after the physical
5748          * addresses have been transferred to the new BD location.
5749          */
5750         smp_wmb();
5751
5752         src_map->data = NULL;
5753 }
5754
5755 /* The RX ring scheme is composed of multiple rings which post fresh
5756  * buffers to the chip, and one special ring the chip uses to report
5757  * status back to the host.
5758  *
5759  * The special ring reports the status of received packets to the
5760  * host.  The chip does not write into the original descriptor the
5761  * RX buffer was obtained from.  The chip simply takes the original
5762  * descriptor as provided by the host, updates the status and length
5763  * field, then writes this into the next status ring entry.
5764  *
5765  * Each ring the host uses to post buffers to the chip is described
5766  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5767  * it is first placed into the on-chip ram.  When the packet's length
5768  * is known, it walks down the TG3_BDINFO entries to select the ring.
5769  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5770  * which is within the range of the new packet's length is chosen.
5771  *
5772  * The "separate ring for rx status" scheme may sound queer, but it makes
5773  * sense from a cache coherency perspective.  If only the host writes
5774  * to the buffer post rings, and only the chip writes to the rx status
5775  * rings, then cache lines never move beyond shared-modified state.
5776  * If both the host and chip were to write into the same ring, cache line
5777  * eviction could occur since both entities want it in an exclusive state.
5778  */
5779 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5780 {
5781         struct tg3 *tp = tnapi->tp;
5782         u32 work_mask, rx_std_posted = 0;
5783         u32 std_prod_idx, jmb_prod_idx;
5784         u32 sw_idx = tnapi->rx_rcb_ptr;
5785         u16 hw_idx;
5786         int received;
5787         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5788
5789         hw_idx = *(tnapi->rx_rcb_prod_idx);
5790         /*
5791          * We need to order the read of hw_idx and the read of
5792          * the opaque cookie.
5793          */
5794         rmb();
5795         work_mask = 0;
5796         received = 0;
5797         std_prod_idx = tpr->rx_std_prod_idx;
5798         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5799         while (sw_idx != hw_idx && budget > 0) {
5800                 struct ring_info *ri;
5801                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5802                 unsigned int len;
5803                 struct sk_buff *skb;
5804                 dma_addr_t dma_addr;
5805                 u32 opaque_key, desc_idx, *post_ptr;
5806                 u8 *data;
5807
5808                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5809                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5810                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5811                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5812                         dma_addr = dma_unmap_addr(ri, mapping);
5813                         data = ri->data;
5814                         post_ptr = &std_prod_idx;
5815                         rx_std_posted++;
5816                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5817                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5818                         dma_addr = dma_unmap_addr(ri, mapping);
5819                         data = ri->data;
5820                         post_ptr = &jmb_prod_idx;
5821                 } else
5822                         goto next_pkt_nopost;
5823
5824                 work_mask |= opaque_key;
5825
5826                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5827                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5828                 drop_it:
5829                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5830                                        desc_idx, *post_ptr);
5831                 drop_it_no_recycle:
5832                         /* Other statistics kept track of by card. */
5833                         tp->rx_dropped++;
5834                         goto next_pkt;
5835                 }
5836
5837                 prefetch(data + TG3_RX_OFFSET(tp));
5838                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5839                       ETH_FCS_LEN;
5840
5841                 if (len > TG3_RX_COPY_THRESH(tp)) {
5842                         int skb_size;
5843
5844                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5845                                                     *post_ptr);
5846                         if (skb_size < 0)
5847                                 goto drop_it;
5848
5849                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5850                                          PCI_DMA_FROMDEVICE);
5851
5852                         skb = build_skb(data);
5853                         if (!skb) {
5854                                 kfree(data);
5855                                 goto drop_it_no_recycle;
5856                         }
5857                         skb_reserve(skb, TG3_RX_OFFSET(tp));
5858                         /* Ensure that the update to the data happens
5859                          * after the usage of the old DMA mapping.
5860                          */
5861                         smp_wmb();
5862
5863                         ri->data = NULL;
5864
5865                 } else {
5866                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5867                                        desc_idx, *post_ptr);
5868
5869                         skb = netdev_alloc_skb(tp->dev,
5870                                                len + TG3_RAW_IP_ALIGN);
5871                         if (skb == NULL)
5872                                 goto drop_it_no_recycle;
5873
5874                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
5875                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5876                         memcpy(skb->data,
5877                                data + TG3_RX_OFFSET(tp),
5878                                len);
5879                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5880                 }
5881
5882                 skb_put(skb, len);
5883                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5884                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5885                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5886                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5887                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5888                 else
5889                         skb_checksum_none_assert(skb);
5890
5891                 skb->protocol = eth_type_trans(skb, tp->dev);
5892
5893                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5894                     skb->protocol != htons(ETH_P_8021Q)) {
5895                         dev_kfree_skb(skb);
5896                         goto drop_it_no_recycle;
5897                 }
5898
5899                 if (desc->type_flags & RXD_FLAG_VLAN &&
5900                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5901                         __vlan_hwaccel_put_tag(skb,
5902                                                desc->err_vlan & RXD_VLAN_MASK);
5903
5904                 napi_gro_receive(&tnapi->napi, skb);
5905
5906                 received++;
5907                 budget--;
5908
5909 next_pkt:
5910                 (*post_ptr)++;
5911
5912                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5913                         tpr->rx_std_prod_idx = std_prod_idx &
5914                                                tp->rx_std_ring_mask;
5915                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5916                                      tpr->rx_std_prod_idx);
5917                         work_mask &= ~RXD_OPAQUE_RING_STD;
5918                         rx_std_posted = 0;
5919                 }
5920 next_pkt_nopost:
5921                 sw_idx++;
5922                 sw_idx &= tp->rx_ret_ring_mask;
5923
5924                 /* Refresh hw_idx to see if there is new work */
5925                 if (sw_idx == hw_idx) {
5926                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5927                         rmb();
5928                 }
5929         }
5930
5931         /* ACK the status ring. */
5932         tnapi->rx_rcb_ptr = sw_idx;
5933         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5934
5935         /* Refill RX ring(s). */
5936         if (!tg3_flag(tp, ENABLE_RSS)) {
5937                 if (work_mask & RXD_OPAQUE_RING_STD) {
5938                         tpr->rx_std_prod_idx = std_prod_idx &
5939                                                tp->rx_std_ring_mask;
5940                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5941                                      tpr->rx_std_prod_idx);
5942                 }
5943                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5944                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5945                                                tp->rx_jmb_ring_mask;
5946                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5947                                      tpr->rx_jmb_prod_idx);
5948                 }
5949                 mmiowb();
5950         } else if (work_mask) {
5951                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5952                  * updated before the producer indices can be updated.
5953                  */
5954                 smp_wmb();
5955
5956                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5957                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5958
5959                 if (tnapi != &tp->napi[1])
5960                         napi_schedule(&tp->napi[1].napi);
5961         }
5962
5963         return received;
5964 }
5965
5966 static void tg3_poll_link(struct tg3 *tp)
5967 {
5968         /* handle link change and other phy events */
5969         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5970                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5971
5972                 if (sblk->status & SD_STATUS_LINK_CHG) {
5973                         sblk->status = SD_STATUS_UPDATED |
5974                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5975                         spin_lock(&tp->lock);
5976                         if (tg3_flag(tp, USE_PHYLIB)) {
5977                                 tw32_f(MAC_STATUS,
5978                                      (MAC_STATUS_SYNC_CHANGED |
5979                                       MAC_STATUS_CFG_CHANGED |
5980                                       MAC_STATUS_MI_COMPLETION |
5981                                       MAC_STATUS_LNKSTATE_CHANGED));
5982                                 udelay(40);
5983                         } else
5984                                 tg3_setup_phy(tp, 0);
5985                         spin_unlock(&tp->lock);
5986                 }
5987         }
5988 }
5989
5990 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5991                                 struct tg3_rx_prodring_set *dpr,
5992                                 struct tg3_rx_prodring_set *spr)
5993 {
5994         u32 si, di, cpycnt, src_prod_idx;
5995         int i, err = 0;
5996
5997         while (1) {
5998                 src_prod_idx = spr->rx_std_prod_idx;
5999
6000                 /* Make sure updates to the rx_std_buffers[] entries and the
6001                  * standard producer index are seen in the correct order.
6002                  */
6003                 smp_rmb();
6004
6005                 if (spr->rx_std_cons_idx == src_prod_idx)
6006                         break;
6007
6008                 if (spr->rx_std_cons_idx < src_prod_idx)
6009                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6010                 else
6011                         cpycnt = tp->rx_std_ring_mask + 1 -
6012                                  spr->rx_std_cons_idx;
6013
6014                 cpycnt = min(cpycnt,
6015                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6016
6017                 si = spr->rx_std_cons_idx;
6018                 di = dpr->rx_std_prod_idx;
6019
6020                 for (i = di; i < di + cpycnt; i++) {
6021                         if (dpr->rx_std_buffers[i].data) {
6022                                 cpycnt = i - di;
6023                                 err = -ENOSPC;
6024                                 break;
6025                         }
6026                 }
6027
6028                 if (!cpycnt)
6029                         break;
6030
6031                 /* Ensure that updates to the rx_std_buffers ring and the
6032                  * shadowed hardware producer ring from tg3_recycle_skb() are
6033                  * ordered correctly WRT the skb check above.
6034                  */
6035                 smp_rmb();
6036
6037                 memcpy(&dpr->rx_std_buffers[di],
6038                        &spr->rx_std_buffers[si],
6039                        cpycnt * sizeof(struct ring_info));
6040
6041                 for (i = 0; i < cpycnt; i++, di++, si++) {
6042                         struct tg3_rx_buffer_desc *sbd, *dbd;
6043                         sbd = &spr->rx_std[si];
6044                         dbd = &dpr->rx_std[di];
6045                         dbd->addr_hi = sbd->addr_hi;
6046                         dbd->addr_lo = sbd->addr_lo;
6047                 }
6048
6049                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6050                                        tp->rx_std_ring_mask;
6051                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6052                                        tp->rx_std_ring_mask;
6053         }
6054
6055         while (1) {
6056                 src_prod_idx = spr->rx_jmb_prod_idx;
6057
6058                 /* Make sure updates to the rx_jmb_buffers[] entries and
6059                  * the jumbo producer index are seen in the correct order.
6060                  */
6061                 smp_rmb();
6062
6063                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6064                         break;
6065
6066                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6067                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6068                 else
6069                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6070                                  spr->rx_jmb_cons_idx;
6071
6072                 cpycnt = min(cpycnt,
6073                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6074
6075                 si = spr->rx_jmb_cons_idx;
6076                 di = dpr->rx_jmb_prod_idx;
6077
6078                 for (i = di; i < di + cpycnt; i++) {
6079                         if (dpr->rx_jmb_buffers[i].data) {
6080                                 cpycnt = i - di;
6081                                 err = -ENOSPC;
6082                                 break;
6083                         }
6084                 }
6085
6086                 if (!cpycnt)
6087                         break;
6088
6089                 /* Ensure that updates to the rx_jmb_buffers ring and the
6090                  * shadowed hardware producer ring from tg3_recycle_skb() are
6091                  * ordered correctly WRT the skb check above.
6092                  */
6093                 smp_rmb();
6094
6095                 memcpy(&dpr->rx_jmb_buffers[di],
6096                        &spr->rx_jmb_buffers[si],
6097                        cpycnt * sizeof(struct ring_info));
6098
6099                 for (i = 0; i < cpycnt; i++, di++, si++) {
6100                         struct tg3_rx_buffer_desc *sbd, *dbd;
6101                         sbd = &spr->rx_jmb[si].std;
6102                         dbd = &dpr->rx_jmb[di].std;
6103                         dbd->addr_hi = sbd->addr_hi;
6104                         dbd->addr_lo = sbd->addr_lo;
6105                 }
6106
6107                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6108                                        tp->rx_jmb_ring_mask;
6109                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6110                                        tp->rx_jmb_ring_mask;
6111         }
6112
6113         return err;
6114 }
6115
6116 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6117 {
6118         struct tg3 *tp = tnapi->tp;
6119
6120         /* run TX completion thread */
6121         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6122                 tg3_tx(tnapi);
6123                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6124                         return work_done;
6125         }
6126
6127         /* run RX thread, within the bounds set by NAPI.
6128          * All RX "locking" is done by ensuring outside
6129          * code synchronizes with tg3->napi.poll()
6130          */
6131         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6132                 work_done += tg3_rx(tnapi, budget - work_done);
6133
6134         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6135                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6136                 int i, err = 0;
6137                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6138                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6139
6140                 for (i = 1; i < tp->irq_cnt; i++)
6141                         err |= tg3_rx_prodring_xfer(tp, dpr,
6142                                                     &tp->napi[i].prodring);
6143
6144                 wmb();
6145
6146                 if (std_prod_idx != dpr->rx_std_prod_idx)
6147                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6148                                      dpr->rx_std_prod_idx);
6149
6150                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6151                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6152                                      dpr->rx_jmb_prod_idx);
6153
6154                 mmiowb();
6155
6156                 if (err)
6157                         tw32_f(HOSTCC_MODE, tp->coal_now);
6158         }
6159
6160         return work_done;
6161 }
6162
6163 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6164 {
6165         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6166                 schedule_work(&tp->reset_task);
6167 }
6168
6169 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6170 {
6171         cancel_work_sync(&tp->reset_task);
6172         tg3_flag_clear(tp, RESET_TASK_PENDING);
6173 }
6174
6175 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6176 {
6177         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6178         struct tg3 *tp = tnapi->tp;
6179         int work_done = 0;
6180         struct tg3_hw_status *sblk = tnapi->hw_status;
6181
6182         while (1) {
6183                 work_done = tg3_poll_work(tnapi, work_done, budget);
6184
6185                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6186                         goto tx_recovery;
6187
6188                 if (unlikely(work_done >= budget))
6189                         break;
6190
6191                 /* tp->last_tag is used in tg3_int_reenable() below
6192                  * to tell the hw how much work has been processed,
6193                  * so we must read it before checking for more work.
6194                  */
6195                 tnapi->last_tag = sblk->status_tag;
6196                 tnapi->last_irq_tag = tnapi->last_tag;
6197                 rmb();
6198
6199                 /* check for RX/TX work to do */
6200                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6201                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6202                         napi_complete(napi);
6203                         /* Reenable interrupts. */
6204                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6205                         mmiowb();
6206                         break;
6207                 }
6208         }
6209
6210         return work_done;
6211
6212 tx_recovery:
6213         /* work_done is guaranteed to be less than budget. */
6214         napi_complete(napi);
6215         tg3_reset_task_schedule(tp);
6216         return work_done;
6217 }
6218
6219 static void tg3_process_error(struct tg3 *tp)
6220 {
6221         u32 val;
6222         bool real_error = false;
6223
6224         if (tg3_flag(tp, ERROR_PROCESSED))
6225                 return;
6226
6227         /* Check Flow Attention register */
6228         val = tr32(HOSTCC_FLOW_ATTN);
6229         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6230                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6231                 real_error = true;
6232         }
6233
6234         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6235                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6236                 real_error = true;
6237         }
6238
6239         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6240                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6241                 real_error = true;
6242         }
6243
6244         if (!real_error)
6245                 return;
6246
6247         tg3_dump_state(tp);
6248
6249         tg3_flag_set(tp, ERROR_PROCESSED);
6250         tg3_reset_task_schedule(tp);
6251 }
6252
6253 static int tg3_poll(struct napi_struct *napi, int budget)
6254 {
6255         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6256         struct tg3 *tp = tnapi->tp;
6257         int work_done = 0;
6258         struct tg3_hw_status *sblk = tnapi->hw_status;
6259
6260         while (1) {
6261                 if (sblk->status & SD_STATUS_ERROR)
6262                         tg3_process_error(tp);
6263
6264                 tg3_poll_link(tp);
6265
6266                 work_done = tg3_poll_work(tnapi, work_done, budget);
6267
6268                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6269                         goto tx_recovery;
6270
6271                 if (unlikely(work_done >= budget))
6272                         break;
6273
6274                 if (tg3_flag(tp, TAGGED_STATUS)) {
6275                         /* tp->last_tag is used in tg3_int_reenable() below
6276                          * to tell the hw how much work has been processed,
6277                          * so we must read it before checking for more work.
6278                          */
6279                         tnapi->last_tag = sblk->status_tag;
6280                         tnapi->last_irq_tag = tnapi->last_tag;
6281                         rmb();
6282                 } else
6283                         sblk->status &= ~SD_STATUS_UPDATED;
6284
6285                 if (likely(!tg3_has_work(tnapi))) {
6286                         napi_complete(napi);
6287                         tg3_int_reenable(tnapi);
6288                         break;
6289                 }
6290         }
6291
6292         return work_done;
6293
6294 tx_recovery:
6295         /* work_done is guaranteed to be less than budget. */
6296         napi_complete(napi);
6297         tg3_reset_task_schedule(tp);
6298         return work_done;
6299 }
6300
6301 static void tg3_napi_disable(struct tg3 *tp)
6302 {
6303         int i;
6304
6305         for (i = tp->irq_cnt - 1; i >= 0; i--)
6306                 napi_disable(&tp->napi[i].napi);
6307 }
6308
6309 static void tg3_napi_enable(struct tg3 *tp)
6310 {
6311         int i;
6312
6313         for (i = 0; i < tp->irq_cnt; i++)
6314                 napi_enable(&tp->napi[i].napi);
6315 }
6316
6317 static void tg3_napi_init(struct tg3 *tp)
6318 {
6319         int i;
6320
6321         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6322         for (i = 1; i < tp->irq_cnt; i++)
6323                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6324 }
6325
6326 static void tg3_napi_fini(struct tg3 *tp)
6327 {
6328         int i;
6329
6330         for (i = 0; i < tp->irq_cnt; i++)
6331                 netif_napi_del(&tp->napi[i].napi);
6332 }
6333
6334 static inline void tg3_netif_stop(struct tg3 *tp)
6335 {
6336         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6337         tg3_napi_disable(tp);
6338         netif_tx_disable(tp->dev);
6339 }
6340
6341 static inline void tg3_netif_start(struct tg3 *tp)
6342 {
6343         /* NOTE: unconditional netif_tx_wake_all_queues is only
6344          * appropriate so long as all callers are assured to
6345          * have free tx slots (such as after tg3_init_hw)
6346          */
6347         netif_tx_wake_all_queues(tp->dev);
6348
6349         tg3_napi_enable(tp);
6350         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6351         tg3_enable_ints(tp);
6352 }
6353
6354 static void tg3_irq_quiesce(struct tg3 *tp)
6355 {
6356         int i;
6357
6358         BUG_ON(tp->irq_sync);
6359
6360         tp->irq_sync = 1;
6361         smp_mb();
6362
6363         for (i = 0; i < tp->irq_cnt; i++)
6364                 synchronize_irq(tp->napi[i].irq_vec);
6365 }
6366
6367 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6368  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6369  * with as well.  Most of the time, this is not necessary except when
6370  * shutting down the device.
6371  */
6372 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6373 {
6374         spin_lock_bh(&tp->lock);
6375         if (irq_sync)
6376                 tg3_irq_quiesce(tp);
6377 }
6378
6379 static inline void tg3_full_unlock(struct tg3 *tp)
6380 {
6381         spin_unlock_bh(&tp->lock);
6382 }
6383
6384 /* One-shot MSI handler - Chip automatically disables interrupt
6385  * after sending MSI so driver doesn't have to do it.
6386  */
6387 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6388 {
6389         struct tg3_napi *tnapi = dev_id;
6390         struct tg3 *tp = tnapi->tp;
6391
6392         prefetch(tnapi->hw_status);
6393         if (tnapi->rx_rcb)
6394                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6395
6396         if (likely(!tg3_irq_sync(tp)))
6397                 napi_schedule(&tnapi->napi);
6398
6399         return IRQ_HANDLED;
6400 }
6401
6402 /* MSI ISR - No need to check for interrupt sharing and no need to
6403  * flush status block and interrupt mailbox. PCI ordering rules
6404  * guarantee that MSI will arrive after the status block.
6405  */
6406 static irqreturn_t tg3_msi(int irq, void *dev_id)
6407 {
6408         struct tg3_napi *tnapi = dev_id;
6409         struct tg3 *tp = tnapi->tp;
6410
6411         prefetch(tnapi->hw_status);
6412         if (tnapi->rx_rcb)
6413                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6414         /*
6415          * Writing any value to intr-mbox-0 clears PCI INTA# and
6416          * chip-internal interrupt pending events.
6417          * Writing non-zero to intr-mbox-0 additional tells the
6418          * NIC to stop sending us irqs, engaging "in-intr-handler"
6419          * event coalescing.
6420          */
6421         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6422         if (likely(!tg3_irq_sync(tp)))
6423                 napi_schedule(&tnapi->napi);
6424
6425         return IRQ_RETVAL(1);
6426 }
6427
6428 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6429 {
6430         struct tg3_napi *tnapi = dev_id;
6431         struct tg3 *tp = tnapi->tp;
6432         struct tg3_hw_status *sblk = tnapi->hw_status;
6433         unsigned int handled = 1;
6434
6435         /* In INTx mode, it is possible for the interrupt to arrive at
6436          * the CPU before the status block posted prior to the interrupt.
6437          * Reading the PCI State register will confirm whether the
6438          * interrupt is ours and will flush the status block.
6439          */
6440         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6441                 if (tg3_flag(tp, CHIP_RESETTING) ||
6442                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6443                         handled = 0;
6444                         goto out;
6445                 }
6446         }
6447
6448         /*
6449          * Writing any value to intr-mbox-0 clears PCI INTA# and
6450          * chip-internal interrupt pending events.
6451          * Writing non-zero to intr-mbox-0 additional tells the
6452          * NIC to stop sending us irqs, engaging "in-intr-handler"
6453          * event coalescing.
6454          *
6455          * Flush the mailbox to de-assert the IRQ immediately to prevent
6456          * spurious interrupts.  The flush impacts performance but
6457          * excessive spurious interrupts can be worse in some cases.
6458          */
6459         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6460         if (tg3_irq_sync(tp))
6461                 goto out;
6462         sblk->status &= ~SD_STATUS_UPDATED;
6463         if (likely(tg3_has_work(tnapi))) {
6464                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6465                 napi_schedule(&tnapi->napi);
6466         } else {
6467                 /* No work, shared interrupt perhaps?  re-enable
6468                  * interrupts, and flush that PCI write
6469                  */
6470                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6471                                0x00000000);
6472         }
6473 out:
6474         return IRQ_RETVAL(handled);
6475 }
6476
6477 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6478 {
6479         struct tg3_napi *tnapi = dev_id;
6480         struct tg3 *tp = tnapi->tp;
6481         struct tg3_hw_status *sblk = tnapi->hw_status;
6482         unsigned int handled = 1;
6483
6484         /* In INTx mode, it is possible for the interrupt to arrive at
6485          * the CPU before the status block posted prior to the interrupt.
6486          * Reading the PCI State register will confirm whether the
6487          * interrupt is ours and will flush the status block.
6488          */
6489         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6490                 if (tg3_flag(tp, CHIP_RESETTING) ||
6491                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6492                         handled = 0;
6493                         goto out;
6494                 }
6495         }
6496
6497         /*
6498          * writing any value to intr-mbox-0 clears PCI INTA# and
6499          * chip-internal interrupt pending events.
6500          * writing non-zero to intr-mbox-0 additional tells the
6501          * NIC to stop sending us irqs, engaging "in-intr-handler"
6502          * event coalescing.
6503          *
6504          * Flush the mailbox to de-assert the IRQ immediately to prevent
6505          * spurious interrupts.  The flush impacts performance but
6506          * excessive spurious interrupts can be worse in some cases.
6507          */
6508         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6509
6510         /*
6511          * In a shared interrupt configuration, sometimes other devices'
6512          * interrupts will scream.  We record the current status tag here
6513          * so that the above check can report that the screaming interrupts
6514          * are unhandled.  Eventually they will be silenced.
6515          */
6516         tnapi->last_irq_tag = sblk->status_tag;
6517
6518         if (tg3_irq_sync(tp))
6519                 goto out;
6520
6521         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6522
6523         napi_schedule(&tnapi->napi);
6524
6525 out:
6526         return IRQ_RETVAL(handled);
6527 }
6528
6529 /* ISR for interrupt test */
6530 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6531 {
6532         struct tg3_napi *tnapi = dev_id;
6533         struct tg3 *tp = tnapi->tp;
6534         struct tg3_hw_status *sblk = tnapi->hw_status;
6535
6536         if ((sblk->status & SD_STATUS_UPDATED) ||
6537             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6538                 tg3_disable_ints(tp);
6539                 return IRQ_RETVAL(1);
6540         }
6541         return IRQ_RETVAL(0);
6542 }
6543
6544 #ifdef CONFIG_NET_POLL_CONTROLLER
6545 static void tg3_poll_controller(struct net_device *dev)
6546 {
6547         int i;
6548         struct tg3 *tp = netdev_priv(dev);
6549
6550         for (i = 0; i < tp->irq_cnt; i++)
6551                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6552 }
6553 #endif
6554
6555 static void tg3_tx_timeout(struct net_device *dev)
6556 {
6557         struct tg3 *tp = netdev_priv(dev);
6558
6559         if (netif_msg_tx_err(tp)) {
6560                 netdev_err(dev, "transmit timed out, resetting\n");
6561                 tg3_dump_state(tp);
6562         }
6563
6564         tg3_reset_task_schedule(tp);
6565 }
6566
6567 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6568 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6569 {
6570         u32 base = (u32) mapping & 0xffffffff;
6571
6572         return (base > 0xffffdcc0) && (base + len + 8 < base);
6573 }
6574
6575 /* Test for DMA addresses > 40-bit */
6576 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6577                                           int len)
6578 {
6579 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6580         if (tg3_flag(tp, 40BIT_DMA_BUG))
6581                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6582         return 0;
6583 #else
6584         return 0;
6585 #endif
6586 }
6587
6588 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6589                                  dma_addr_t mapping, u32 len, u32 flags,
6590                                  u32 mss, u32 vlan)
6591 {
6592         txbd->addr_hi = ((u64) mapping >> 32);
6593         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6594         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6595         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6596 }
6597
6598 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6599                             dma_addr_t map, u32 len, u32 flags,
6600                             u32 mss, u32 vlan)
6601 {
6602         struct tg3 *tp = tnapi->tp;
6603         bool hwbug = false;
6604
6605         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6606                 hwbug = true;
6607
6608         if (tg3_4g_overflow_test(map, len))
6609                 hwbug = true;
6610
6611         if (tg3_40bit_overflow_test(tp, map, len))
6612                 hwbug = true;
6613
6614         if (tp->dma_limit) {
6615                 u32 prvidx = *entry;
6616                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6617                 while (len > tp->dma_limit && *budget) {
6618                         u32 frag_len = tp->dma_limit;
6619                         len -= tp->dma_limit;
6620
6621                         /* Avoid the 8byte DMA problem */
6622                         if (len <= 8) {
6623                                 len += tp->dma_limit / 2;
6624                                 frag_len = tp->dma_limit / 2;
6625                         }
6626
6627                         tnapi->tx_buffers[*entry].fragmented = true;
6628
6629                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6630                                       frag_len, tmp_flag, mss, vlan);
6631                         *budget -= 1;
6632                         prvidx = *entry;
6633                         *entry = NEXT_TX(*entry);
6634
6635                         map += frag_len;
6636                 }
6637
6638                 if (len) {
6639                         if (*budget) {
6640                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6641                                               len, flags, mss, vlan);
6642                                 *budget -= 1;
6643                                 *entry = NEXT_TX(*entry);
6644                         } else {
6645                                 hwbug = true;
6646                                 tnapi->tx_buffers[prvidx].fragmented = false;
6647                         }
6648                 }
6649         } else {
6650                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6651                               len, flags, mss, vlan);
6652                 *entry = NEXT_TX(*entry);
6653         }
6654
6655         return hwbug;
6656 }
6657
6658 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6659 {
6660         int i;
6661         struct sk_buff *skb;
6662         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6663
6664         skb = txb->skb;
6665         txb->skb = NULL;
6666
6667         pci_unmap_single(tnapi->tp->pdev,
6668                          dma_unmap_addr(txb, mapping),
6669                          skb_headlen(skb),
6670                          PCI_DMA_TODEVICE);
6671
6672         while (txb->fragmented) {
6673                 txb->fragmented = false;
6674                 entry = NEXT_TX(entry);
6675                 txb = &tnapi->tx_buffers[entry];
6676         }
6677
6678         for (i = 0; i <= last; i++) {
6679                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6680
6681                 entry = NEXT_TX(entry);
6682                 txb = &tnapi->tx_buffers[entry];
6683
6684                 pci_unmap_page(tnapi->tp->pdev,
6685                                dma_unmap_addr(txb, mapping),
6686                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6687
6688                 while (txb->fragmented) {
6689                         txb->fragmented = false;
6690                         entry = NEXT_TX(entry);
6691                         txb = &tnapi->tx_buffers[entry];
6692                 }
6693         }
6694 }
6695
6696 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6697 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6698                                        struct sk_buff **pskb,
6699                                        u32 *entry, u32 *budget,
6700                                        u32 base_flags, u32 mss, u32 vlan)
6701 {
6702         struct tg3 *tp = tnapi->tp;
6703         struct sk_buff *new_skb, *skb = *pskb;
6704         dma_addr_t new_addr = 0;
6705         int ret = 0;
6706
6707         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6708                 new_skb = skb_copy(skb, GFP_ATOMIC);
6709         else {
6710                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6711
6712                 new_skb = skb_copy_expand(skb,
6713                                           skb_headroom(skb) + more_headroom,
6714                                           skb_tailroom(skb), GFP_ATOMIC);
6715         }
6716
6717         if (!new_skb) {
6718                 ret = -1;
6719         } else {
6720                 /* New SKB is guaranteed to be linear. */
6721                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6722                                           PCI_DMA_TODEVICE);
6723                 /* Make sure the mapping succeeded */
6724                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6725                         dev_kfree_skb(new_skb);
6726                         ret = -1;
6727                 } else {
6728                         u32 save_entry = *entry;
6729
6730                         base_flags |= TXD_FLAG_END;
6731
6732                         tnapi->tx_buffers[*entry].skb = new_skb;
6733                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6734                                            mapping, new_addr);
6735
6736                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6737                                             new_skb->len, base_flags,
6738                                             mss, vlan)) {
6739                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6740                                 dev_kfree_skb(new_skb);
6741                                 ret = -1;
6742                         }
6743                 }
6744         }
6745
6746         dev_kfree_skb(skb);
6747         *pskb = new_skb;
6748         return ret;
6749 }
6750
6751 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6752
6753 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6754  * TSO header is greater than 80 bytes.
6755  */
6756 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6757 {
6758         struct sk_buff *segs, *nskb;
6759         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6760
6761         /* Estimate the number of fragments in the worst case */
6762         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6763                 netif_stop_queue(tp->dev);
6764
6765                 /* netif_tx_stop_queue() must be done before checking
6766                  * checking tx index in tg3_tx_avail() below, because in
6767                  * tg3_tx(), we update tx index before checking for
6768                  * netif_tx_queue_stopped().
6769                  */
6770                 smp_mb();
6771                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6772                         return NETDEV_TX_BUSY;
6773
6774                 netif_wake_queue(tp->dev);
6775         }
6776
6777         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6778         if (IS_ERR(segs))
6779                 goto tg3_tso_bug_end;
6780
6781         do {
6782                 nskb = segs;
6783                 segs = segs->next;
6784                 nskb->next = NULL;
6785                 tg3_start_xmit(nskb, tp->dev);
6786         } while (segs);
6787
6788 tg3_tso_bug_end:
6789         dev_kfree_skb(skb);
6790
6791         return NETDEV_TX_OK;
6792 }
6793
6794 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6795  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6796  */
6797 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6798 {
6799         struct tg3 *tp = netdev_priv(dev);
6800         u32 len, entry, base_flags, mss, vlan = 0;
6801         u32 budget;
6802         int i = -1, would_hit_hwbug;
6803         dma_addr_t mapping;
6804         struct tg3_napi *tnapi;
6805         struct netdev_queue *txq;
6806         unsigned int last;
6807
6808         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6809         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6810         if (tg3_flag(tp, ENABLE_TSS))
6811                 tnapi++;
6812
6813         budget = tg3_tx_avail(tnapi);
6814
6815         /* We are running in BH disabled context with netif_tx_lock
6816          * and TX reclaim runs via tp->napi.poll inside of a software
6817          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6818          * no IRQ context deadlocks to worry about either.  Rejoice!
6819          */
6820         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6821                 if (!netif_tx_queue_stopped(txq)) {
6822                         netif_tx_stop_queue(txq);
6823
6824                         /* This is a hard error, log it. */
6825                         netdev_err(dev,
6826                                    "BUG! Tx Ring full when queue awake!\n");
6827                 }
6828                 return NETDEV_TX_BUSY;
6829         }
6830
6831         entry = tnapi->tx_prod;
6832         base_flags = 0;
6833         if (skb->ip_summed == CHECKSUM_PARTIAL)
6834                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6835
6836         mss = skb_shinfo(skb)->gso_size;
6837         if (mss) {
6838                 struct iphdr *iph;
6839                 u32 tcp_opt_len, hdr_len;
6840
6841                 if (skb_header_cloned(skb) &&
6842                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6843                         goto drop;
6844
6845                 iph = ip_hdr(skb);
6846                 tcp_opt_len = tcp_optlen(skb);
6847
6848                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6849
6850                 if (!skb_is_gso_v6(skb)) {
6851                         iph->check = 0;
6852                         iph->tot_len = htons(mss + hdr_len);
6853                 }
6854
6855                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6856                     tg3_flag(tp, TSO_BUG))
6857                         return tg3_tso_bug(tp, skb);
6858
6859                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6860                                TXD_FLAG_CPU_POST_DMA);
6861
6862                 if (tg3_flag(tp, HW_TSO_1) ||
6863                     tg3_flag(tp, HW_TSO_2) ||
6864                     tg3_flag(tp, HW_TSO_3)) {
6865                         tcp_hdr(skb)->check = 0;
6866                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6867                 } else
6868                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6869                                                                  iph->daddr, 0,
6870                                                                  IPPROTO_TCP,
6871                                                                  0);
6872
6873                 if (tg3_flag(tp, HW_TSO_3)) {
6874                         mss |= (hdr_len & 0xc) << 12;
6875                         if (hdr_len & 0x10)
6876                                 base_flags |= 0x00000010;
6877                         base_flags |= (hdr_len & 0x3e0) << 5;
6878                 } else if (tg3_flag(tp, HW_TSO_2))
6879                         mss |= hdr_len << 9;
6880                 else if (tg3_flag(tp, HW_TSO_1) ||
6881                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6882                         if (tcp_opt_len || iph->ihl > 5) {
6883                                 int tsflags;
6884
6885                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6886                                 mss |= (tsflags << 11);
6887                         }
6888                 } else {
6889                         if (tcp_opt_len || iph->ihl > 5) {
6890                                 int tsflags;
6891
6892                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6893                                 base_flags |= tsflags << 12;
6894                         }
6895                 }
6896         }
6897
6898         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6899             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6900                 base_flags |= TXD_FLAG_JMB_PKT;
6901
6902         if (vlan_tx_tag_present(skb)) {
6903                 base_flags |= TXD_FLAG_VLAN;
6904                 vlan = vlan_tx_tag_get(skb);
6905         }
6906
6907         len = skb_headlen(skb);
6908
6909         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6910         if (pci_dma_mapping_error(tp->pdev, mapping))
6911                 goto drop;
6912
6913
6914         tnapi->tx_buffers[entry].skb = skb;
6915         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6916
6917         would_hit_hwbug = 0;
6918
6919         if (tg3_flag(tp, 5701_DMA_BUG))
6920                 would_hit_hwbug = 1;
6921
6922         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6923                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6924                             mss, vlan)) {
6925                 would_hit_hwbug = 1;
6926         /* Now loop through additional data fragments, and queue them. */
6927         } else if (skb_shinfo(skb)->nr_frags > 0) {
6928                 u32 tmp_mss = mss;
6929
6930                 if (!tg3_flag(tp, HW_TSO_1) &&
6931                     !tg3_flag(tp, HW_TSO_2) &&
6932                     !tg3_flag(tp, HW_TSO_3))
6933                         tmp_mss = 0;
6934
6935                 last = skb_shinfo(skb)->nr_frags - 1;
6936                 for (i = 0; i <= last; i++) {
6937                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6938
6939                         len = skb_frag_size(frag);
6940                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6941                                                    len, DMA_TO_DEVICE);
6942
6943                         tnapi->tx_buffers[entry].skb = NULL;
6944                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6945                                            mapping);
6946                         if (dma_mapping_error(&tp->pdev->dev, mapping))
6947                                 goto dma_error;
6948
6949                         if (!budget ||
6950                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6951                                             len, base_flags |
6952                                             ((i == last) ? TXD_FLAG_END : 0),
6953                                             tmp_mss, vlan)) {
6954                                 would_hit_hwbug = 1;
6955                                 break;
6956                         }
6957                 }
6958         }
6959
6960         if (would_hit_hwbug) {
6961                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6962
6963                 /* If the workaround fails due to memory/mapping
6964                  * failure, silently drop this packet.
6965                  */
6966                 entry = tnapi->tx_prod;
6967                 budget = tg3_tx_avail(tnapi);
6968                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6969                                                 base_flags, mss, vlan))
6970                         goto drop_nofree;
6971         }
6972
6973         skb_tx_timestamp(skb);
6974         netdev_sent_queue(tp->dev, skb->len);
6975
6976         /* Packets are ready, update Tx producer idx local and on card. */
6977         tw32_tx_mbox(tnapi->prodmbox, entry);
6978
6979         tnapi->tx_prod = entry;
6980         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6981                 netif_tx_stop_queue(txq);
6982
6983                 /* netif_tx_stop_queue() must be done before checking
6984                  * checking tx index in tg3_tx_avail() below, because in
6985                  * tg3_tx(), we update tx index before checking for
6986                  * netif_tx_queue_stopped().
6987                  */
6988                 smp_mb();
6989                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6990                         netif_tx_wake_queue(txq);
6991         }
6992
6993         mmiowb();
6994         return NETDEV_TX_OK;
6995
6996 dma_error:
6997         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
6998         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6999 drop:
7000         dev_kfree_skb(skb);
7001 drop_nofree:
7002         tp->tx_dropped++;
7003         return NETDEV_TX_OK;
7004 }
7005
7006 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7007 {
7008         if (enable) {
7009                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7010                                   MAC_MODE_PORT_MODE_MASK);
7011
7012                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7013
7014                 if (!tg3_flag(tp, 5705_PLUS))
7015                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7016
7017                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7018                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7019                 else
7020                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7021         } else {
7022                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7023
7024                 if (tg3_flag(tp, 5705_PLUS) ||
7025                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7026                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7027                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7028         }
7029
7030         tw32(MAC_MODE, tp->mac_mode);
7031         udelay(40);
7032 }
7033
7034 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7035 {
7036         u32 val, bmcr, mac_mode, ptest = 0;
7037
7038         tg3_phy_toggle_apd(tp, false);
7039         tg3_phy_toggle_automdix(tp, 0);
7040
7041         if (extlpbk && tg3_phy_set_extloopbk(tp))
7042                 return -EIO;
7043
7044         bmcr = BMCR_FULLDPLX;
7045         switch (speed) {
7046         case SPEED_10:
7047                 break;
7048         case SPEED_100:
7049                 bmcr |= BMCR_SPEED100;
7050                 break;
7051         case SPEED_1000:
7052         default:
7053                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7054                         speed = SPEED_100;
7055                         bmcr |= BMCR_SPEED100;
7056                 } else {
7057                         speed = SPEED_1000;
7058                         bmcr |= BMCR_SPEED1000;
7059                 }
7060         }
7061
7062         if (extlpbk) {
7063                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7064                         tg3_readphy(tp, MII_CTRL1000, &val);
7065                         val |= CTL1000_AS_MASTER |
7066                                CTL1000_ENABLE_MASTER;
7067                         tg3_writephy(tp, MII_CTRL1000, val);
7068                 } else {
7069                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7070                                 MII_TG3_FET_PTEST_TRIM_2;
7071                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7072                 }
7073         } else
7074                 bmcr |= BMCR_LOOPBACK;
7075
7076         tg3_writephy(tp, MII_BMCR, bmcr);
7077
7078         /* The write needs to be flushed for the FETs */
7079         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7080                 tg3_readphy(tp, MII_BMCR, &bmcr);
7081
7082         udelay(40);
7083
7084         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7085             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7086                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7087                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7088                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7089
7090                 /* The write needs to be flushed for the AC131 */
7091                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7092         }
7093
7094         /* Reset to prevent losing 1st rx packet intermittently */
7095         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7096             tg3_flag(tp, 5780_CLASS)) {
7097                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7098                 udelay(10);
7099                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7100         }
7101
7102         mac_mode = tp->mac_mode &
7103                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7104         if (speed == SPEED_1000)
7105                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7106         else
7107                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7108
7109         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7110                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7111
7112                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7113                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7114                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7115                         mac_mode |= MAC_MODE_LINK_POLARITY;
7116
7117                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7118                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7119         }
7120
7121         tw32(MAC_MODE, mac_mode);
7122         udelay(40);
7123
7124         return 0;
7125 }
7126
7127 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7128 {
7129         struct tg3 *tp = netdev_priv(dev);
7130
7131         if (features & NETIF_F_LOOPBACK) {
7132                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7133                         return;
7134
7135                 spin_lock_bh(&tp->lock);
7136                 tg3_mac_loopback(tp, true);
7137                 netif_carrier_on(tp->dev);
7138                 spin_unlock_bh(&tp->lock);
7139                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7140         } else {
7141                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7142                         return;
7143
7144                 spin_lock_bh(&tp->lock);
7145                 tg3_mac_loopback(tp, false);
7146                 /* Force link status check */
7147                 tg3_setup_phy(tp, 1);
7148                 spin_unlock_bh(&tp->lock);
7149                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7150         }
7151 }
7152
7153 static netdev_features_t tg3_fix_features(struct net_device *dev,
7154         netdev_features_t features)
7155 {
7156         struct tg3 *tp = netdev_priv(dev);
7157
7158         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7159                 features &= ~NETIF_F_ALL_TSO;
7160
7161         return features;
7162 }
7163
7164 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7165 {
7166         netdev_features_t changed = dev->features ^ features;
7167
7168         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7169                 tg3_set_loopback(dev, features);
7170
7171         return 0;
7172 }
7173
7174 static void tg3_rx_prodring_free(struct tg3 *tp,
7175                                  struct tg3_rx_prodring_set *tpr)
7176 {
7177         int i;
7178
7179         if (tpr != &tp->napi[0].prodring) {
7180                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7181                      i = (i + 1) & tp->rx_std_ring_mask)
7182                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7183                                         tp->rx_pkt_map_sz);
7184
7185                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7186                         for (i = tpr->rx_jmb_cons_idx;
7187                              i != tpr->rx_jmb_prod_idx;
7188                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7189                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7190                                                 TG3_RX_JMB_MAP_SZ);
7191                         }
7192                 }
7193
7194                 return;
7195         }
7196
7197         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7198                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7199                                 tp->rx_pkt_map_sz);
7200
7201         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7202                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7203                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7204                                         TG3_RX_JMB_MAP_SZ);
7205         }
7206 }
7207
7208 /* Initialize rx rings for packet processing.
7209  *
7210  * The chip has been shut down and the driver detached from
7211  * the networking, so no interrupts or new tx packets will
7212  * end up in the driver.  tp->{tx,}lock are held and thus
7213  * we may not sleep.
7214  */
7215 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7216                                  struct tg3_rx_prodring_set *tpr)
7217 {
7218         u32 i, rx_pkt_dma_sz;
7219
7220         tpr->rx_std_cons_idx = 0;
7221         tpr->rx_std_prod_idx = 0;
7222         tpr->rx_jmb_cons_idx = 0;
7223         tpr->rx_jmb_prod_idx = 0;
7224
7225         if (tpr != &tp->napi[0].prodring) {
7226                 memset(&tpr->rx_std_buffers[0], 0,
7227                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7228                 if (tpr->rx_jmb_buffers)
7229                         memset(&tpr->rx_jmb_buffers[0], 0,
7230                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7231                 goto done;
7232         }
7233
7234         /* Zero out all descriptors. */
7235         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7236
7237         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7238         if (tg3_flag(tp, 5780_CLASS) &&
7239             tp->dev->mtu > ETH_DATA_LEN)
7240                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7241         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7242
7243         /* Initialize invariants of the rings, we only set this
7244          * stuff once.  This works because the card does not
7245          * write into the rx buffer posting rings.
7246          */
7247         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7248                 struct tg3_rx_buffer_desc *rxd;
7249
7250                 rxd = &tpr->rx_std[i];
7251                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7252                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7253                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7254                                (i << RXD_OPAQUE_INDEX_SHIFT));
7255         }
7256
7257         /* Now allocate fresh SKBs for each rx ring. */
7258         for (i = 0; i < tp->rx_pending; i++) {
7259                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7260                         netdev_warn(tp->dev,
7261                                     "Using a smaller RX standard ring. Only "
7262                                     "%d out of %d buffers were allocated "
7263                                     "successfully\n", i, tp->rx_pending);
7264                         if (i == 0)
7265                                 goto initfail;
7266                         tp->rx_pending = i;
7267                         break;
7268                 }
7269         }
7270
7271         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7272                 goto done;
7273
7274         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7275
7276         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7277                 goto done;
7278
7279         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7280                 struct tg3_rx_buffer_desc *rxd;
7281
7282                 rxd = &tpr->rx_jmb[i].std;
7283                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7284                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7285                                   RXD_FLAG_JUMBO;
7286                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7287                        (i << RXD_OPAQUE_INDEX_SHIFT));
7288         }
7289
7290         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7291                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7292                         netdev_warn(tp->dev,
7293                                     "Using a smaller RX jumbo ring. Only %d "
7294                                     "out of %d buffers were allocated "
7295                                     "successfully\n", i, tp->rx_jumbo_pending);
7296                         if (i == 0)
7297                                 goto initfail;
7298                         tp->rx_jumbo_pending = i;
7299                         break;
7300                 }
7301         }
7302
7303 done:
7304         return 0;
7305
7306 initfail:
7307         tg3_rx_prodring_free(tp, tpr);
7308         return -ENOMEM;
7309 }
7310
7311 static void tg3_rx_prodring_fini(struct tg3 *tp,
7312                                  struct tg3_rx_prodring_set *tpr)
7313 {
7314         kfree(tpr->rx_std_buffers);
7315         tpr->rx_std_buffers = NULL;
7316         kfree(tpr->rx_jmb_buffers);
7317         tpr->rx_jmb_buffers = NULL;
7318         if (tpr->rx_std) {
7319                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7320                                   tpr->rx_std, tpr->rx_std_mapping);
7321                 tpr->rx_std = NULL;
7322         }
7323         if (tpr->rx_jmb) {
7324                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7325                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7326                 tpr->rx_jmb = NULL;
7327         }
7328 }
7329
7330 static int tg3_rx_prodring_init(struct tg3 *tp,
7331                                 struct tg3_rx_prodring_set *tpr)
7332 {
7333         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7334                                       GFP_KERNEL);
7335         if (!tpr->rx_std_buffers)
7336                 return -ENOMEM;
7337
7338         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7339                                          TG3_RX_STD_RING_BYTES(tp),
7340                                          &tpr->rx_std_mapping,
7341                                          GFP_KERNEL);
7342         if (!tpr->rx_std)
7343                 goto err_out;
7344
7345         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7346                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7347                                               GFP_KERNEL);
7348                 if (!tpr->rx_jmb_buffers)
7349                         goto err_out;
7350
7351                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7352                                                  TG3_RX_JMB_RING_BYTES(tp),
7353                                                  &tpr->rx_jmb_mapping,
7354                                                  GFP_KERNEL);
7355                 if (!tpr->rx_jmb)
7356                         goto err_out;
7357         }
7358
7359         return 0;
7360
7361 err_out:
7362         tg3_rx_prodring_fini(tp, tpr);
7363         return -ENOMEM;
7364 }
7365
7366 /* Free up pending packets in all rx/tx rings.
7367  *
7368  * The chip has been shut down and the driver detached from
7369  * the networking, so no interrupts or new tx packets will
7370  * end up in the driver.  tp->{tx,}lock is not held and we are not
7371  * in an interrupt context and thus may sleep.
7372  */
7373 static void tg3_free_rings(struct tg3 *tp)
7374 {
7375         int i, j;
7376
7377         for (j = 0; j < tp->irq_cnt; j++) {
7378                 struct tg3_napi *tnapi = &tp->napi[j];
7379
7380                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7381
7382                 if (!tnapi->tx_buffers)
7383                         continue;
7384
7385                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7386                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7387
7388                         if (!skb)
7389                                 continue;
7390
7391                         tg3_tx_skb_unmap(tnapi, i,
7392                                          skb_shinfo(skb)->nr_frags - 1);
7393
7394                         dev_kfree_skb_any(skb);
7395                 }
7396         }
7397         netdev_reset_queue(tp->dev);
7398 }
7399
7400 /* Initialize tx/rx rings for packet processing.
7401  *
7402  * The chip has been shut down and the driver detached from
7403  * the networking, so no interrupts or new tx packets will
7404  * end up in the driver.  tp->{tx,}lock are held and thus
7405  * we may not sleep.
7406  */
7407 static int tg3_init_rings(struct tg3 *tp)
7408 {
7409         int i;
7410
7411         /* Free up all the SKBs. */
7412         tg3_free_rings(tp);
7413
7414         for (i = 0; i < tp->irq_cnt; i++) {
7415                 struct tg3_napi *tnapi = &tp->napi[i];
7416
7417                 tnapi->last_tag = 0;
7418                 tnapi->last_irq_tag = 0;
7419                 tnapi->hw_status->status = 0;
7420                 tnapi->hw_status->status_tag = 0;
7421                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7422
7423                 tnapi->tx_prod = 0;
7424                 tnapi->tx_cons = 0;
7425                 if (tnapi->tx_ring)
7426                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7427
7428                 tnapi->rx_rcb_ptr = 0;
7429                 if (tnapi->rx_rcb)
7430                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7431
7432                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7433                         tg3_free_rings(tp);
7434                         return -ENOMEM;
7435                 }
7436         }
7437
7438         return 0;
7439 }
7440
7441 /*
7442  * Must not be invoked with interrupt sources disabled and
7443  * the hardware shutdown down.
7444  */
7445 static void tg3_free_consistent(struct tg3 *tp)
7446 {
7447         int i;
7448
7449         for (i = 0; i < tp->irq_cnt; i++) {
7450                 struct tg3_napi *tnapi = &tp->napi[i];
7451
7452                 if (tnapi->tx_ring) {
7453                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7454                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7455                         tnapi->tx_ring = NULL;
7456                 }
7457
7458                 kfree(tnapi->tx_buffers);
7459                 tnapi->tx_buffers = NULL;
7460
7461                 if (tnapi->rx_rcb) {
7462                         dma_free_coherent(&tp->pdev->dev,
7463                                           TG3_RX_RCB_RING_BYTES(tp),
7464                                           tnapi->rx_rcb,
7465                                           tnapi->rx_rcb_mapping);
7466                         tnapi->rx_rcb = NULL;
7467                 }
7468
7469                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7470
7471                 if (tnapi->hw_status) {
7472                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7473                                           tnapi->hw_status,
7474                                           tnapi->status_mapping);
7475                         tnapi->hw_status = NULL;
7476                 }
7477         }
7478
7479         if (tp->hw_stats) {
7480                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7481                                   tp->hw_stats, tp->stats_mapping);
7482                 tp->hw_stats = NULL;
7483         }
7484 }
7485
7486 /*
7487  * Must not be invoked with interrupt sources disabled and
7488  * the hardware shutdown down.  Can sleep.
7489  */
7490 static int tg3_alloc_consistent(struct tg3 *tp)
7491 {
7492         int i;
7493
7494         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7495                                           sizeof(struct tg3_hw_stats),
7496                                           &tp->stats_mapping,
7497                                           GFP_KERNEL);
7498         if (!tp->hw_stats)
7499                 goto err_out;
7500
7501         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7502
7503         for (i = 0; i < tp->irq_cnt; i++) {
7504                 struct tg3_napi *tnapi = &tp->napi[i];
7505                 struct tg3_hw_status *sblk;
7506
7507                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7508                                                       TG3_HW_STATUS_SIZE,
7509                                                       &tnapi->status_mapping,
7510                                                       GFP_KERNEL);
7511                 if (!tnapi->hw_status)
7512                         goto err_out;
7513
7514                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7515                 sblk = tnapi->hw_status;
7516
7517                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7518                         goto err_out;
7519
7520                 /* If multivector TSS is enabled, vector 0 does not handle
7521                  * tx interrupts.  Don't allocate any resources for it.
7522                  */
7523                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7524                     (i && tg3_flag(tp, ENABLE_TSS))) {
7525                         tnapi->tx_buffers = kzalloc(
7526                                                sizeof(struct tg3_tx_ring_info) *
7527                                                TG3_TX_RING_SIZE, GFP_KERNEL);
7528                         if (!tnapi->tx_buffers)
7529                                 goto err_out;
7530
7531                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7532                                                             TG3_TX_RING_BYTES,
7533                                                         &tnapi->tx_desc_mapping,
7534                                                             GFP_KERNEL);
7535                         if (!tnapi->tx_ring)
7536                                 goto err_out;
7537                 }
7538
7539                 /*
7540                  * When RSS is enabled, the status block format changes
7541                  * slightly.  The "rx_jumbo_consumer", "reserved",
7542                  * and "rx_mini_consumer" members get mapped to the
7543                  * other three rx return ring producer indexes.
7544                  */
7545                 switch (i) {
7546                 default:
7547                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7548                         break;
7549                 case 2:
7550                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7551                         break;
7552                 case 3:
7553                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
7554                         break;
7555                 case 4:
7556                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7557                         break;
7558                 }
7559
7560                 /*
7561                  * If multivector RSS is enabled, vector 0 does not handle
7562                  * rx or tx interrupts.  Don't allocate any resources for it.
7563                  */
7564                 if (!i && tg3_flag(tp, ENABLE_RSS))
7565                         continue;
7566
7567                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7568                                                    TG3_RX_RCB_RING_BYTES(tp),
7569                                                    &tnapi->rx_rcb_mapping,
7570                                                    GFP_KERNEL);
7571                 if (!tnapi->rx_rcb)
7572                         goto err_out;
7573
7574                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7575         }
7576
7577         return 0;
7578
7579 err_out:
7580         tg3_free_consistent(tp);
7581         return -ENOMEM;
7582 }
7583
7584 #define MAX_WAIT_CNT 1000
7585
7586 /* To stop a block, clear the enable bit and poll till it
7587  * clears.  tp->lock is held.
7588  */
7589 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7590 {
7591         unsigned int i;
7592         u32 val;
7593
7594         if (tg3_flag(tp, 5705_PLUS)) {
7595                 switch (ofs) {
7596                 case RCVLSC_MODE:
7597                 case DMAC_MODE:
7598                 case MBFREE_MODE:
7599                 case BUFMGR_MODE:
7600                 case MEMARB_MODE:
7601                         /* We can't enable/disable these bits of the
7602                          * 5705/5750, just say success.
7603                          */
7604                         return 0;
7605
7606                 default:
7607                         break;
7608                 }
7609         }
7610
7611         val = tr32(ofs);
7612         val &= ~enable_bit;
7613         tw32_f(ofs, val);
7614
7615         for (i = 0; i < MAX_WAIT_CNT; i++) {
7616                 udelay(100);
7617                 val = tr32(ofs);
7618                 if ((val & enable_bit) == 0)
7619                         break;
7620         }
7621
7622         if (i == MAX_WAIT_CNT && !silent) {
7623                 dev_err(&tp->pdev->dev,
7624                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7625                         ofs, enable_bit);
7626                 return -ENODEV;
7627         }
7628
7629         return 0;
7630 }
7631
7632 /* tp->lock is held. */
7633 static int tg3_abort_hw(struct tg3 *tp, int silent)
7634 {
7635         int i, err;
7636
7637         tg3_disable_ints(tp);
7638
7639         tp->rx_mode &= ~RX_MODE_ENABLE;
7640         tw32_f(MAC_RX_MODE, tp->rx_mode);
7641         udelay(10);
7642
7643         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7644         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7645         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7646         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7647         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7648         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7649
7650         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7651         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7652         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7653         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7654         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7655         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7656         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7657
7658         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7659         tw32_f(MAC_MODE, tp->mac_mode);
7660         udelay(40);
7661
7662         tp->tx_mode &= ~TX_MODE_ENABLE;
7663         tw32_f(MAC_TX_MODE, tp->tx_mode);
7664
7665         for (i = 0; i < MAX_WAIT_CNT; i++) {
7666                 udelay(100);
7667                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7668                         break;
7669         }
7670         if (i >= MAX_WAIT_CNT) {
7671                 dev_err(&tp->pdev->dev,
7672                         "%s timed out, TX_MODE_ENABLE will not clear "
7673                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7674                 err |= -ENODEV;
7675         }
7676
7677         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7678         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7679         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7680
7681         tw32(FTQ_RESET, 0xffffffff);
7682         tw32(FTQ_RESET, 0x00000000);
7683
7684         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7685         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7686
7687         for (i = 0; i < tp->irq_cnt; i++) {
7688                 struct tg3_napi *tnapi = &tp->napi[i];
7689                 if (tnapi->hw_status)
7690                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7691         }
7692
7693         return err;
7694 }
7695
7696 /* Save PCI command register before chip reset */
7697 static void tg3_save_pci_state(struct tg3 *tp)
7698 {
7699         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7700 }
7701
7702 /* Restore PCI state after chip reset */
7703 static void tg3_restore_pci_state(struct tg3 *tp)
7704 {
7705         u32 val;
7706
7707         /* Re-enable indirect register accesses. */
7708         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7709                                tp->misc_host_ctrl);
7710
7711         /* Set MAX PCI retry to zero. */
7712         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7713         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7714             tg3_flag(tp, PCIX_MODE))
7715                 val |= PCISTATE_RETRY_SAME_DMA;
7716         /* Allow reads and writes to the APE register and memory space. */
7717         if (tg3_flag(tp, ENABLE_APE))
7718                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7719                        PCISTATE_ALLOW_APE_SHMEM_WR |
7720                        PCISTATE_ALLOW_APE_PSPACE_WR;
7721         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7722
7723         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7724
7725         if (!tg3_flag(tp, PCI_EXPRESS)) {
7726                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7727                                       tp->pci_cacheline_sz);
7728                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7729                                       tp->pci_lat_timer);
7730         }
7731
7732         /* Make sure PCI-X relaxed ordering bit is clear. */
7733         if (tg3_flag(tp, PCIX_MODE)) {
7734                 u16 pcix_cmd;
7735
7736                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7737                                      &pcix_cmd);
7738                 pcix_cmd &= ~PCI_X_CMD_ERO;
7739                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7740                                       pcix_cmd);
7741         }
7742
7743         if (tg3_flag(tp, 5780_CLASS)) {
7744
7745                 /* Chip reset on 5780 will reset MSI enable bit,
7746                  * so need to restore it.
7747                  */
7748                 if (tg3_flag(tp, USING_MSI)) {
7749                         u16 ctrl;
7750
7751                         pci_read_config_word(tp->pdev,
7752                                              tp->msi_cap + PCI_MSI_FLAGS,
7753                                              &ctrl);
7754                         pci_write_config_word(tp->pdev,
7755                                               tp->msi_cap + PCI_MSI_FLAGS,
7756                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7757                         val = tr32(MSGINT_MODE);
7758                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7759                 }
7760         }
7761 }
7762
7763 /* tp->lock is held. */
7764 static int tg3_chip_reset(struct tg3 *tp)
7765 {
7766         u32 val;
7767         void (*write_op)(struct tg3 *, u32, u32);
7768         int i, err;
7769
7770         tg3_nvram_lock(tp);
7771
7772         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7773
7774         /* No matching tg3_nvram_unlock() after this because
7775          * chip reset below will undo the nvram lock.
7776          */
7777         tp->nvram_lock_cnt = 0;
7778
7779         /* GRC_MISC_CFG core clock reset will clear the memory
7780          * enable bit in PCI register 4 and the MSI enable bit
7781          * on some chips, so we save relevant registers here.
7782          */
7783         tg3_save_pci_state(tp);
7784
7785         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7786             tg3_flag(tp, 5755_PLUS))
7787                 tw32(GRC_FASTBOOT_PC, 0);
7788
7789         /*
7790          * We must avoid the readl() that normally takes place.
7791          * It locks machines, causes machine checks, and other
7792          * fun things.  So, temporarily disable the 5701
7793          * hardware workaround, while we do the reset.
7794          */
7795         write_op = tp->write32;
7796         if (write_op == tg3_write_flush_reg32)
7797                 tp->write32 = tg3_write32;
7798
7799         /* Prevent the irq handler from reading or writing PCI registers
7800          * during chip reset when the memory enable bit in the PCI command
7801          * register may be cleared.  The chip does not generate interrupt
7802          * at this time, but the irq handler may still be called due to irq
7803          * sharing or irqpoll.
7804          */
7805         tg3_flag_set(tp, CHIP_RESETTING);
7806         for (i = 0; i < tp->irq_cnt; i++) {
7807                 struct tg3_napi *tnapi = &tp->napi[i];
7808                 if (tnapi->hw_status) {
7809                         tnapi->hw_status->status = 0;
7810                         tnapi->hw_status->status_tag = 0;
7811                 }
7812                 tnapi->last_tag = 0;
7813                 tnapi->last_irq_tag = 0;
7814         }
7815         smp_mb();
7816
7817         for (i = 0; i < tp->irq_cnt; i++)
7818                 synchronize_irq(tp->napi[i].irq_vec);
7819
7820         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7821                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7822                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7823         }
7824
7825         /* do the reset */
7826         val = GRC_MISC_CFG_CORECLK_RESET;
7827
7828         if (tg3_flag(tp, PCI_EXPRESS)) {
7829                 /* Force PCIe 1.0a mode */
7830                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7831                     !tg3_flag(tp, 57765_PLUS) &&
7832                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7833                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7834                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7835
7836                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7837                         tw32(GRC_MISC_CFG, (1 << 29));
7838                         val |= (1 << 29);
7839                 }
7840         }
7841
7842         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7843                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7844                 tw32(GRC_VCPU_EXT_CTRL,
7845                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7846         }
7847
7848         /* Manage gphy power for all CPMU absent PCIe devices. */
7849         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7850                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7851
7852         tw32(GRC_MISC_CFG, val);
7853
7854         /* restore 5701 hardware bug workaround write method */
7855         tp->write32 = write_op;
7856
7857         /* Unfortunately, we have to delay before the PCI read back.
7858          * Some 575X chips even will not respond to a PCI cfg access
7859          * when the reset command is given to the chip.
7860          *
7861          * How do these hardware designers expect things to work
7862          * properly if the PCI write is posted for a long period
7863          * of time?  It is always necessary to have some method by
7864          * which a register read back can occur to push the write
7865          * out which does the reset.
7866          *
7867          * For most tg3 variants the trick below was working.
7868          * Ho hum...
7869          */
7870         udelay(120);
7871
7872         /* Flush PCI posted writes.  The normal MMIO registers
7873          * are inaccessible at this time so this is the only
7874          * way to make this reliably (actually, this is no longer
7875          * the case, see above).  I tried to use indirect
7876          * register read/write but this upset some 5701 variants.
7877          */
7878         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7879
7880         udelay(120);
7881
7882         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7883                 u16 val16;
7884
7885                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7886                         int i;
7887                         u32 cfg_val;
7888
7889                         /* Wait for link training to complete.  */
7890                         for (i = 0; i < 5000; i++)
7891                                 udelay(100);
7892
7893                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7894                         pci_write_config_dword(tp->pdev, 0xc4,
7895                                                cfg_val | (1 << 15));
7896                 }
7897
7898                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7899                 pci_read_config_word(tp->pdev,
7900                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7901                                      &val16);
7902                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7903                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7904                 /*
7905                  * Older PCIe devices only support the 128 byte
7906                  * MPS setting.  Enforce the restriction.
7907                  */
7908                 if (!tg3_flag(tp, CPMU_PRESENT))
7909                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7910                 pci_write_config_word(tp->pdev,
7911                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7912                                       val16);
7913
7914                 /* Clear error status */
7915                 pci_write_config_word(tp->pdev,
7916                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7917                                       PCI_EXP_DEVSTA_CED |
7918                                       PCI_EXP_DEVSTA_NFED |
7919                                       PCI_EXP_DEVSTA_FED |
7920                                       PCI_EXP_DEVSTA_URD);
7921         }
7922
7923         tg3_restore_pci_state(tp);
7924
7925         tg3_flag_clear(tp, CHIP_RESETTING);
7926         tg3_flag_clear(tp, ERROR_PROCESSED);
7927
7928         val = 0;
7929         if (tg3_flag(tp, 5780_CLASS))
7930                 val = tr32(MEMARB_MODE);
7931         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7932
7933         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7934                 tg3_stop_fw(tp);
7935                 tw32(0x5000, 0x400);
7936         }
7937
7938         tw32(GRC_MODE, tp->grc_mode);
7939
7940         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7941                 val = tr32(0xc4);
7942
7943                 tw32(0xc4, val | (1 << 15));
7944         }
7945
7946         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7947             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7948                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7949                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7950                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7951                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7952         }
7953
7954         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7955                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7956                 val = tp->mac_mode;
7957         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7958                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7959                 val = tp->mac_mode;
7960         } else
7961                 val = 0;
7962
7963         tw32_f(MAC_MODE, val);
7964         udelay(40);
7965
7966         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7967
7968         err = tg3_poll_fw(tp);
7969         if (err)
7970                 return err;
7971
7972         tg3_mdio_start(tp);
7973
7974         if (tg3_flag(tp, PCI_EXPRESS) &&
7975             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7976             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7977             !tg3_flag(tp, 57765_PLUS)) {
7978                 val = tr32(0x7c00);
7979
7980                 tw32(0x7c00, val | (1 << 25));
7981         }
7982
7983         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7984                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7985                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7986         }
7987
7988         /* Reprobe ASF enable state.  */
7989         tg3_flag_clear(tp, ENABLE_ASF);
7990         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7991         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7992         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7993                 u32 nic_cfg;
7994
7995                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7996                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7997                         tg3_flag_set(tp, ENABLE_ASF);
7998                         tp->last_event_jiffies = jiffies;
7999                         if (tg3_flag(tp, 5750_PLUS))
8000                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8001                 }
8002         }
8003
8004         return 0;
8005 }
8006
8007 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
8008                                                  struct rtnl_link_stats64 *);
8009 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *,
8010                                                 struct tg3_ethtool_stats *);
8011
8012 /* tp->lock is held. */
8013 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8014 {
8015         int err;
8016
8017         tg3_stop_fw(tp);
8018
8019         tg3_write_sig_pre_reset(tp, kind);
8020
8021         tg3_abort_hw(tp, silent);
8022         err = tg3_chip_reset(tp);
8023
8024         __tg3_set_mac_addr(tp, 0);
8025
8026         tg3_write_sig_legacy(tp, kind);
8027         tg3_write_sig_post_reset(tp, kind);
8028
8029         if (tp->hw_stats) {
8030                 /* Save the stats across chip resets... */
8031                 tg3_get_stats64(tp->dev, &tp->net_stats_prev),
8032                 tg3_get_estats(tp, &tp->estats_prev);
8033
8034                 /* And make sure the next sample is new data */
8035                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8036         }
8037
8038         if (err)
8039                 return err;
8040
8041         return 0;
8042 }
8043
8044 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8045 {
8046         struct tg3 *tp = netdev_priv(dev);
8047         struct sockaddr *addr = p;
8048         int err = 0, skip_mac_1 = 0;
8049
8050         if (!is_valid_ether_addr(addr->sa_data))
8051                 return -EINVAL;
8052
8053         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8054
8055         if (!netif_running(dev))
8056                 return 0;
8057
8058         if (tg3_flag(tp, ENABLE_ASF)) {
8059                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8060
8061                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8062                 addr0_low = tr32(MAC_ADDR_0_LOW);
8063                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8064                 addr1_low = tr32(MAC_ADDR_1_LOW);
8065
8066                 /* Skip MAC addr 1 if ASF is using it. */
8067                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8068                     !(addr1_high == 0 && addr1_low == 0))
8069                         skip_mac_1 = 1;
8070         }
8071         spin_lock_bh(&tp->lock);
8072         __tg3_set_mac_addr(tp, skip_mac_1);
8073         spin_unlock_bh(&tp->lock);
8074
8075         return err;
8076 }
8077
8078 /* tp->lock is held. */
8079 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8080                            dma_addr_t mapping, u32 maxlen_flags,
8081                            u32 nic_addr)
8082 {
8083         tg3_write_mem(tp,
8084                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8085                       ((u64) mapping >> 32));
8086         tg3_write_mem(tp,
8087                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8088                       ((u64) mapping & 0xffffffff));
8089         tg3_write_mem(tp,
8090                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8091                        maxlen_flags);
8092
8093         if (!tg3_flag(tp, 5705_PLUS))
8094                 tg3_write_mem(tp,
8095                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8096                               nic_addr);
8097 }
8098
8099 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8100 {
8101         int i;
8102
8103         if (!tg3_flag(tp, ENABLE_TSS)) {
8104                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8105                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8106                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8107         } else {
8108                 tw32(HOSTCC_TXCOL_TICKS, 0);
8109                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8110                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8111         }
8112
8113         if (!tg3_flag(tp, ENABLE_RSS)) {
8114                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8115                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8116                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8117         } else {
8118                 tw32(HOSTCC_RXCOL_TICKS, 0);
8119                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8120                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8121         }
8122
8123         if (!tg3_flag(tp, 5705_PLUS)) {
8124                 u32 val = ec->stats_block_coalesce_usecs;
8125
8126                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8127                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8128
8129                 if (!netif_carrier_ok(tp->dev))
8130                         val = 0;
8131
8132                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8133         }
8134
8135         for (i = 0; i < tp->irq_cnt - 1; i++) {
8136                 u32 reg;
8137
8138                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8139                 tw32(reg, ec->rx_coalesce_usecs);
8140                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8141                 tw32(reg, ec->rx_max_coalesced_frames);
8142                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8143                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8144
8145                 if (tg3_flag(tp, ENABLE_TSS)) {
8146                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8147                         tw32(reg, ec->tx_coalesce_usecs);
8148                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8149                         tw32(reg, ec->tx_max_coalesced_frames);
8150                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8151                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8152                 }
8153         }
8154
8155         for (; i < tp->irq_max - 1; i++) {
8156                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8157                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8158                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8159
8160                 if (tg3_flag(tp, ENABLE_TSS)) {
8161                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8162                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8163                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8164                 }
8165         }
8166 }
8167
8168 /* tp->lock is held. */
8169 static void tg3_rings_reset(struct tg3 *tp)
8170 {
8171         int i;
8172         u32 stblk, txrcb, rxrcb, limit;
8173         struct tg3_napi *tnapi = &tp->napi[0];
8174
8175         /* Disable all transmit rings but the first. */
8176         if (!tg3_flag(tp, 5705_PLUS))
8177                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8178         else if (tg3_flag(tp, 5717_PLUS))
8179                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8180         else if (tg3_flag(tp, 57765_CLASS))
8181                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8182         else
8183                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8184
8185         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8186              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8187                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8188                               BDINFO_FLAGS_DISABLED);
8189
8190
8191         /* Disable all receive return rings but the first. */
8192         if (tg3_flag(tp, 5717_PLUS))
8193                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8194         else if (!tg3_flag(tp, 5705_PLUS))
8195                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8196         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8197                  tg3_flag(tp, 57765_CLASS))
8198                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8199         else
8200                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8201
8202         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8203              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8204                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8205                               BDINFO_FLAGS_DISABLED);
8206
8207         /* Disable interrupts */
8208         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8209         tp->napi[0].chk_msi_cnt = 0;
8210         tp->napi[0].last_rx_cons = 0;
8211         tp->napi[0].last_tx_cons = 0;
8212
8213         /* Zero mailbox registers. */
8214         if (tg3_flag(tp, SUPPORT_MSIX)) {
8215                 for (i = 1; i < tp->irq_max; i++) {
8216                         tp->napi[i].tx_prod = 0;
8217                         tp->napi[i].tx_cons = 0;
8218                         if (tg3_flag(tp, ENABLE_TSS))
8219                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8220                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8221                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8222                         tp->napi[i].chk_msi_cnt = 0;
8223                         tp->napi[i].last_rx_cons = 0;
8224                         tp->napi[i].last_tx_cons = 0;
8225                 }
8226                 if (!tg3_flag(tp, ENABLE_TSS))
8227                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8228         } else {
8229                 tp->napi[0].tx_prod = 0;
8230                 tp->napi[0].tx_cons = 0;
8231                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8232                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8233         }
8234
8235         /* Make sure the NIC-based send BD rings are disabled. */
8236         if (!tg3_flag(tp, 5705_PLUS)) {
8237                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8238                 for (i = 0; i < 16; i++)
8239                         tw32_tx_mbox(mbox + i * 8, 0);
8240         }
8241
8242         txrcb = NIC_SRAM_SEND_RCB;
8243         rxrcb = NIC_SRAM_RCV_RET_RCB;
8244
8245         /* Clear status block in ram. */
8246         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8247
8248         /* Set status block DMA address */
8249         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8250              ((u64) tnapi->status_mapping >> 32));
8251         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8252              ((u64) tnapi->status_mapping & 0xffffffff));
8253
8254         if (tnapi->tx_ring) {
8255                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8256                                (TG3_TX_RING_SIZE <<
8257                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8258                                NIC_SRAM_TX_BUFFER_DESC);
8259                 txrcb += TG3_BDINFO_SIZE;
8260         }
8261
8262         if (tnapi->rx_rcb) {
8263                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8264                                (tp->rx_ret_ring_mask + 1) <<
8265                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8266                 rxrcb += TG3_BDINFO_SIZE;
8267         }
8268
8269         stblk = HOSTCC_STATBLCK_RING1;
8270
8271         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8272                 u64 mapping = (u64)tnapi->status_mapping;
8273                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8274                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8275
8276                 /* Clear status block in ram. */
8277                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8278
8279                 if (tnapi->tx_ring) {
8280                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8281                                        (TG3_TX_RING_SIZE <<
8282                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8283                                        NIC_SRAM_TX_BUFFER_DESC);
8284                         txrcb += TG3_BDINFO_SIZE;
8285                 }
8286
8287                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8288                                ((tp->rx_ret_ring_mask + 1) <<
8289                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8290
8291                 stblk += 8;
8292                 rxrcb += TG3_BDINFO_SIZE;
8293         }
8294 }
8295
8296 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8297 {
8298         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8299
8300         if (!tg3_flag(tp, 5750_PLUS) ||
8301             tg3_flag(tp, 5780_CLASS) ||
8302             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8303             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8304             tg3_flag(tp, 57765_PLUS))
8305                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8306         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8307                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8308                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8309         else
8310                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8311
8312         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8313         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8314
8315         val = min(nic_rep_thresh, host_rep_thresh);
8316         tw32(RCVBDI_STD_THRESH, val);
8317
8318         if (tg3_flag(tp, 57765_PLUS))
8319                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8320
8321         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8322                 return;
8323
8324         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8325
8326         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8327
8328         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8329         tw32(RCVBDI_JUMBO_THRESH, val);
8330
8331         if (tg3_flag(tp, 57765_PLUS))
8332                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8333 }
8334
8335 static inline u32 calc_crc(unsigned char *buf, int len)
8336 {
8337         u32 reg;
8338         u32 tmp;
8339         int j, k;
8340
8341         reg = 0xffffffff;
8342
8343         for (j = 0; j < len; j++) {
8344                 reg ^= buf[j];
8345
8346                 for (k = 0; k < 8; k++) {
8347                         tmp = reg & 0x01;
8348
8349                         reg >>= 1;
8350
8351                         if (tmp)
8352                                 reg ^= 0xedb88320;
8353                 }
8354         }
8355
8356         return ~reg;
8357 }
8358
8359 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8360 {
8361         /* accept or reject all multicast frames */
8362         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8363         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8364         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8365         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8366 }
8367
8368 static void __tg3_set_rx_mode(struct net_device *dev)
8369 {
8370         struct tg3 *tp = netdev_priv(dev);
8371         u32 rx_mode;
8372
8373         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8374                                   RX_MODE_KEEP_VLAN_TAG);
8375
8376 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8377         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8378          * flag clear.
8379          */
8380         if (!tg3_flag(tp, ENABLE_ASF))
8381                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8382 #endif
8383
8384         if (dev->flags & IFF_PROMISC) {
8385                 /* Promiscuous mode. */
8386                 rx_mode |= RX_MODE_PROMISC;
8387         } else if (dev->flags & IFF_ALLMULTI) {
8388                 /* Accept all multicast. */
8389                 tg3_set_multi(tp, 1);
8390         } else if (netdev_mc_empty(dev)) {
8391                 /* Reject all multicast. */
8392                 tg3_set_multi(tp, 0);
8393         } else {
8394                 /* Accept one or more multicast(s). */
8395                 struct netdev_hw_addr *ha;
8396                 u32 mc_filter[4] = { 0, };
8397                 u32 regidx;
8398                 u32 bit;
8399                 u32 crc;
8400
8401                 netdev_for_each_mc_addr(ha, dev) {
8402                         crc = calc_crc(ha->addr, ETH_ALEN);
8403                         bit = ~crc & 0x7f;
8404                         regidx = (bit & 0x60) >> 5;
8405                         bit &= 0x1f;
8406                         mc_filter[regidx] |= (1 << bit);
8407                 }
8408
8409                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8410                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8411                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8412                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8413         }
8414
8415         if (rx_mode != tp->rx_mode) {
8416                 tp->rx_mode = rx_mode;
8417                 tw32_f(MAC_RX_MODE, rx_mode);
8418                 udelay(10);
8419         }
8420 }
8421
8422 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8423 {
8424         int i;
8425
8426         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8427                 tp->rss_ind_tbl[i] =
8428                         ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8429 }
8430
8431 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8432 {
8433         int i;
8434
8435         if (!tg3_flag(tp, SUPPORT_MSIX))
8436                 return;
8437
8438         if (tp->irq_cnt <= 2) {
8439                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8440                 return;
8441         }
8442
8443         /* Validate table against current IRQ count */
8444         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8445                 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8446                         break;
8447         }
8448
8449         if (i != TG3_RSS_INDIR_TBL_SIZE)
8450                 tg3_rss_init_dflt_indir_tbl(tp);
8451 }
8452
8453 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8454 {
8455         int i = 0;
8456         u32 reg = MAC_RSS_INDIR_TBL_0;
8457
8458         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8459                 u32 val = tp->rss_ind_tbl[i];
8460                 i++;
8461                 for (; i % 8; i++) {
8462                         val <<= 4;
8463                         val |= tp->rss_ind_tbl[i];
8464                 }
8465                 tw32(reg, val);
8466                 reg += 4;
8467         }
8468 }
8469
8470 /* tp->lock is held. */
8471 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8472 {
8473         u32 val, rdmac_mode;
8474         int i, err, limit;
8475         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8476
8477         tg3_disable_ints(tp);
8478
8479         tg3_stop_fw(tp);
8480
8481         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8482
8483         if (tg3_flag(tp, INIT_COMPLETE))
8484                 tg3_abort_hw(tp, 1);
8485
8486         /* Enable MAC control of LPI */
8487         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8488                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8489                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8490                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8491
8492                 tw32_f(TG3_CPMU_EEE_CTRL,
8493                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8494
8495                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8496                       TG3_CPMU_EEEMD_LPI_IN_TX |
8497                       TG3_CPMU_EEEMD_LPI_IN_RX |
8498                       TG3_CPMU_EEEMD_EEE_ENABLE;
8499
8500                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8501                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8502
8503                 if (tg3_flag(tp, ENABLE_APE))
8504                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8505
8506                 tw32_f(TG3_CPMU_EEE_MODE, val);
8507
8508                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8509                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8510                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8511
8512                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8513                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8514                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8515         }
8516
8517         if (reset_phy)
8518                 tg3_phy_reset(tp);
8519
8520         err = tg3_chip_reset(tp);
8521         if (err)
8522                 return err;
8523
8524         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8525
8526         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8527                 val = tr32(TG3_CPMU_CTRL);
8528                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8529                 tw32(TG3_CPMU_CTRL, val);
8530
8531                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8532                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8533                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8534                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8535
8536                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8537                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8538                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8539                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8540
8541                 val = tr32(TG3_CPMU_HST_ACC);
8542                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8543                 val |= CPMU_HST_ACC_MACCLK_6_25;
8544                 tw32(TG3_CPMU_HST_ACC, val);
8545         }
8546
8547         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8548                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8549                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8550                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8551                 tw32(PCIE_PWR_MGMT_THRESH, val);
8552
8553                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8554                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8555
8556                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8557
8558                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8559                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8560         }
8561
8562         if (tg3_flag(tp, L1PLLPD_EN)) {
8563                 u32 grc_mode = tr32(GRC_MODE);
8564
8565                 /* Access the lower 1K of PL PCIE block registers. */
8566                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8567                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8568
8569                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8570                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8571                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8572
8573                 tw32(GRC_MODE, grc_mode);
8574         }
8575
8576         if (tg3_flag(tp, 57765_CLASS)) {
8577                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8578                         u32 grc_mode = tr32(GRC_MODE);
8579
8580                         /* Access the lower 1K of PL PCIE block registers. */
8581                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8582                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8583
8584                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8585                                    TG3_PCIE_PL_LO_PHYCTL5);
8586                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8587                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8588
8589                         tw32(GRC_MODE, grc_mode);
8590                 }
8591
8592                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8593                         u32 grc_mode = tr32(GRC_MODE);
8594
8595                         /* Access the lower 1K of DL PCIE block registers. */
8596                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8597                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8598
8599                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8600                                    TG3_PCIE_DL_LO_FTSMAX);
8601                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8602                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8603                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8604
8605                         tw32(GRC_MODE, grc_mode);
8606                 }
8607
8608                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8609                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8610                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8611                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8612         }
8613
8614         /* This works around an issue with Athlon chipsets on
8615          * B3 tigon3 silicon.  This bit has no effect on any
8616          * other revision.  But do not set this on PCI Express
8617          * chips and don't even touch the clocks if the CPMU is present.
8618          */
8619         if (!tg3_flag(tp, CPMU_PRESENT)) {
8620                 if (!tg3_flag(tp, PCI_EXPRESS))
8621                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8622                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8623         }
8624
8625         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8626             tg3_flag(tp, PCIX_MODE)) {
8627                 val = tr32(TG3PCI_PCISTATE);
8628                 val |= PCISTATE_RETRY_SAME_DMA;
8629                 tw32(TG3PCI_PCISTATE, val);
8630         }
8631
8632         if (tg3_flag(tp, ENABLE_APE)) {
8633                 /* Allow reads and writes to the
8634                  * APE register and memory space.
8635                  */
8636                 val = tr32(TG3PCI_PCISTATE);
8637                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8638                        PCISTATE_ALLOW_APE_SHMEM_WR |
8639                        PCISTATE_ALLOW_APE_PSPACE_WR;
8640                 tw32(TG3PCI_PCISTATE, val);
8641         }
8642
8643         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8644                 /* Enable some hw fixes.  */
8645                 val = tr32(TG3PCI_MSI_DATA);
8646                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8647                 tw32(TG3PCI_MSI_DATA, val);
8648         }
8649
8650         /* Descriptor ring init may make accesses to the
8651          * NIC SRAM area to setup the TX descriptors, so we
8652          * can only do this after the hardware has been
8653          * successfully reset.
8654          */
8655         err = tg3_init_rings(tp);
8656         if (err)
8657                 return err;
8658
8659         if (tg3_flag(tp, 57765_PLUS)) {
8660                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8661                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8662                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8663                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8664                 if (!tg3_flag(tp, 57765_CLASS) &&
8665                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8666                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8667                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8668         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8669                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8670                 /* This value is determined during the probe time DMA
8671                  * engine test, tg3_test_dma.
8672                  */
8673                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8674         }
8675
8676         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8677                           GRC_MODE_4X_NIC_SEND_RINGS |
8678                           GRC_MODE_NO_TX_PHDR_CSUM |
8679                           GRC_MODE_NO_RX_PHDR_CSUM);
8680         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8681
8682         /* Pseudo-header checksum is done by hardware logic and not
8683          * the offload processers, so make the chip do the pseudo-
8684          * header checksums on receive.  For transmit it is more
8685          * convenient to do the pseudo-header checksum in software
8686          * as Linux does that on transmit for us in all cases.
8687          */
8688         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8689
8690         tw32(GRC_MODE,
8691              tp->grc_mode |
8692              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8693
8694         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8695         val = tr32(GRC_MISC_CFG);
8696         val &= ~0xff;
8697         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8698         tw32(GRC_MISC_CFG, val);
8699
8700         /* Initialize MBUF/DESC pool. */
8701         if (tg3_flag(tp, 5750_PLUS)) {
8702                 /* Do nothing.  */
8703         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8704                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8705                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8706                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8707                 else
8708                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8709                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8710                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8711         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8712                 int fw_len;
8713
8714                 fw_len = tp->fw_len;
8715                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8716                 tw32(BUFMGR_MB_POOL_ADDR,
8717                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8718                 tw32(BUFMGR_MB_POOL_SIZE,
8719                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8720         }
8721
8722         if (tp->dev->mtu <= ETH_DATA_LEN) {
8723                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8724                      tp->bufmgr_config.mbuf_read_dma_low_water);
8725                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8726                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8727                 tw32(BUFMGR_MB_HIGH_WATER,
8728                      tp->bufmgr_config.mbuf_high_water);
8729         } else {
8730                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8731                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8732                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8733                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8734                 tw32(BUFMGR_MB_HIGH_WATER,
8735                      tp->bufmgr_config.mbuf_high_water_jumbo);
8736         }
8737         tw32(BUFMGR_DMA_LOW_WATER,
8738              tp->bufmgr_config.dma_low_water);
8739         tw32(BUFMGR_DMA_HIGH_WATER,
8740              tp->bufmgr_config.dma_high_water);
8741
8742         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8743         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8744                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8745         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8746             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8747             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8748                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8749         tw32(BUFMGR_MODE, val);
8750         for (i = 0; i < 2000; i++) {
8751                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8752                         break;
8753                 udelay(10);
8754         }
8755         if (i >= 2000) {
8756                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8757                 return -ENODEV;
8758         }
8759
8760         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8761                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8762
8763         tg3_setup_rxbd_thresholds(tp);
8764
8765         /* Initialize TG3_BDINFO's at:
8766          *  RCVDBDI_STD_BD:     standard eth size rx ring
8767          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8768          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8769          *
8770          * like so:
8771          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8772          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8773          *                              ring attribute flags
8774          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8775          *
8776          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8777          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8778          *
8779          * The size of each ring is fixed in the firmware, but the location is
8780          * configurable.
8781          */
8782         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8783              ((u64) tpr->rx_std_mapping >> 32));
8784         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8785              ((u64) tpr->rx_std_mapping & 0xffffffff));
8786         if (!tg3_flag(tp, 5717_PLUS))
8787                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8788                      NIC_SRAM_RX_BUFFER_DESC);
8789
8790         /* Disable the mini ring */
8791         if (!tg3_flag(tp, 5705_PLUS))
8792                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8793                      BDINFO_FLAGS_DISABLED);
8794
8795         /* Program the jumbo buffer descriptor ring control
8796          * blocks on those devices that have them.
8797          */
8798         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8799             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8800
8801                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8802                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8803                              ((u64) tpr->rx_jmb_mapping >> 32));
8804                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8805                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8806                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8807                               BDINFO_FLAGS_MAXLEN_SHIFT;
8808                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8809                              val | BDINFO_FLAGS_USE_EXT_RECV);
8810                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8811                             tg3_flag(tp, 57765_CLASS))
8812                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8813                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8814                 } else {
8815                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8816                              BDINFO_FLAGS_DISABLED);
8817                 }
8818
8819                 if (tg3_flag(tp, 57765_PLUS)) {
8820                         val = TG3_RX_STD_RING_SIZE(tp);
8821                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8822                         val |= (TG3_RX_STD_DMA_SZ << 2);
8823                 } else
8824                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8825         } else
8826                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8827
8828         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8829
8830         tpr->rx_std_prod_idx = tp->rx_pending;
8831         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8832
8833         tpr->rx_jmb_prod_idx =
8834                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8835         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8836
8837         tg3_rings_reset(tp);
8838
8839         /* Initialize MAC address and backoff seed. */
8840         __tg3_set_mac_addr(tp, 0);
8841
8842         /* MTU + ethernet header + FCS + optional VLAN tag */
8843         tw32(MAC_RX_MTU_SIZE,
8844              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8845
8846         /* The slot time is changed by tg3_setup_phy if we
8847          * run at gigabit with half duplex.
8848          */
8849         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8850               (6 << TX_LENGTHS_IPG_SHIFT) |
8851               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8852
8853         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8854                 val |= tr32(MAC_TX_LENGTHS) &
8855                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8856                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8857
8858         tw32(MAC_TX_LENGTHS, val);
8859
8860         /* Receive rules. */
8861         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8862         tw32(RCVLPC_CONFIG, 0x0181);
8863
8864         /* Calculate RDMAC_MODE setting early, we need it to determine
8865          * the RCVLPC_STATE_ENABLE mask.
8866          */
8867         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8868                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8869                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8870                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8871                       RDMAC_MODE_LNGREAD_ENAB);
8872
8873         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8874                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8875
8876         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8877             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8878             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8879                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8880                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8881                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8882
8883         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8884             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8885                 if (tg3_flag(tp, TSO_CAPABLE) &&
8886                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8887                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8888                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8889                            !tg3_flag(tp, IS_5788)) {
8890                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8891                 }
8892         }
8893
8894         if (tg3_flag(tp, PCI_EXPRESS))
8895                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8896
8897         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
8898                 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
8899
8900         if (tg3_flag(tp, HW_TSO_1) ||
8901             tg3_flag(tp, HW_TSO_2) ||
8902             tg3_flag(tp, HW_TSO_3))
8903                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8904
8905         if (tg3_flag(tp, 57765_PLUS) ||
8906             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8907             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8908                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8909
8910         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8911                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8912
8913         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8914             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8915             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8916             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8917             tg3_flag(tp, 57765_PLUS)) {
8918                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8919                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8920                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8921                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8922                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8923                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8924                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8925                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8926                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8927                 }
8928                 tw32(TG3_RDMA_RSRVCTRL_REG,
8929                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8930         }
8931
8932         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8933             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8934                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8935                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8936                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8937                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8938         }
8939
8940         /* Receive/send statistics. */
8941         if (tg3_flag(tp, 5750_PLUS)) {
8942                 val = tr32(RCVLPC_STATS_ENABLE);
8943                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8944                 tw32(RCVLPC_STATS_ENABLE, val);
8945         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8946                    tg3_flag(tp, TSO_CAPABLE)) {
8947                 val = tr32(RCVLPC_STATS_ENABLE);
8948                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8949                 tw32(RCVLPC_STATS_ENABLE, val);
8950         } else {
8951                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8952         }
8953         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8954         tw32(SNDDATAI_STATSENAB, 0xffffff);
8955         tw32(SNDDATAI_STATSCTRL,
8956              (SNDDATAI_SCTRL_ENABLE |
8957               SNDDATAI_SCTRL_FASTUPD));
8958
8959         /* Setup host coalescing engine. */
8960         tw32(HOSTCC_MODE, 0);
8961         for (i = 0; i < 2000; i++) {
8962                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8963                         break;
8964                 udelay(10);
8965         }
8966
8967         __tg3_set_coalesce(tp, &tp->coal);
8968
8969         if (!tg3_flag(tp, 5705_PLUS)) {
8970                 /* Status/statistics block address.  See tg3_timer,
8971                  * the tg3_periodic_fetch_stats call there, and
8972                  * tg3_get_stats to see how this works for 5705/5750 chips.
8973                  */
8974                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8975                      ((u64) tp->stats_mapping >> 32));
8976                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8977                      ((u64) tp->stats_mapping & 0xffffffff));
8978                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8979
8980                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8981
8982                 /* Clear statistics and status block memory areas */
8983                 for (i = NIC_SRAM_STATS_BLK;
8984                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8985                      i += sizeof(u32)) {
8986                         tg3_write_mem(tp, i, 0);
8987                         udelay(40);
8988                 }
8989         }
8990
8991         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8992
8993         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8994         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8995         if (!tg3_flag(tp, 5705_PLUS))
8996                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8997
8998         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8999                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9000                 /* reset to prevent losing 1st rx packet intermittently */
9001                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9002                 udelay(10);
9003         }
9004
9005         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9006                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9007                         MAC_MODE_FHDE_ENABLE;
9008         if (tg3_flag(tp, ENABLE_APE))
9009                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9010         if (!tg3_flag(tp, 5705_PLUS) &&
9011             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9012             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9013                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9014         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9015         udelay(40);
9016
9017         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9018          * If TG3_FLAG_IS_NIC is zero, we should read the
9019          * register to preserve the GPIO settings for LOMs. The GPIOs,
9020          * whether used as inputs or outputs, are set by boot code after
9021          * reset.
9022          */
9023         if (!tg3_flag(tp, IS_NIC)) {
9024                 u32 gpio_mask;
9025
9026                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9027                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9028                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9029
9030                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9031                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9032                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9033
9034                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9035                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9036
9037                 tp->grc_local_ctrl &= ~gpio_mask;
9038                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9039
9040                 /* GPIO1 must be driven high for eeprom write protect */
9041                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9042                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9043                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9044         }
9045         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9046         udelay(100);
9047
9048         if (tg3_flag(tp, USING_MSIX)) {
9049                 val = tr32(MSGINT_MODE);
9050                 val |= MSGINT_MODE_ENABLE;
9051                 if (tp->irq_cnt > 1)
9052                         val |= MSGINT_MODE_MULTIVEC_EN;
9053                 if (!tg3_flag(tp, 1SHOT_MSI))
9054                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9055                 tw32(MSGINT_MODE, val);
9056         }
9057
9058         if (!tg3_flag(tp, 5705_PLUS)) {
9059                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9060                 udelay(40);
9061         }
9062
9063         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9064                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9065                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9066                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9067                WDMAC_MODE_LNGREAD_ENAB);
9068
9069         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9070             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9071                 if (tg3_flag(tp, TSO_CAPABLE) &&
9072                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9073                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9074                         /* nothing */
9075                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9076                            !tg3_flag(tp, IS_5788)) {
9077                         val |= WDMAC_MODE_RX_ACCEL;
9078                 }
9079         }
9080
9081         /* Enable host coalescing bug fix */
9082         if (tg3_flag(tp, 5755_PLUS))
9083                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9084
9085         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9086                 val |= WDMAC_MODE_BURST_ALL_DATA;
9087
9088         tw32_f(WDMAC_MODE, val);
9089         udelay(40);
9090
9091         if (tg3_flag(tp, PCIX_MODE)) {
9092                 u16 pcix_cmd;
9093
9094                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9095                                      &pcix_cmd);
9096                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9097                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9098                         pcix_cmd |= PCI_X_CMD_READ_2K;
9099                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9100                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9101                         pcix_cmd |= PCI_X_CMD_READ_2K;
9102                 }
9103                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9104                                       pcix_cmd);
9105         }
9106
9107         tw32_f(RDMAC_MODE, rdmac_mode);
9108         udelay(40);
9109
9110         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9111         if (!tg3_flag(tp, 5705_PLUS))
9112                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9113
9114         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9115                 tw32(SNDDATAC_MODE,
9116                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9117         else
9118                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9119
9120         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9121         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9122         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9123         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9124                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9125         tw32(RCVDBDI_MODE, val);
9126         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9127         if (tg3_flag(tp, HW_TSO_1) ||
9128             tg3_flag(tp, HW_TSO_2) ||
9129             tg3_flag(tp, HW_TSO_3))
9130                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9131         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9132         if (tg3_flag(tp, ENABLE_TSS))
9133                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9134         tw32(SNDBDI_MODE, val);
9135         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9136
9137         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9138                 err = tg3_load_5701_a0_firmware_fix(tp);
9139                 if (err)
9140                         return err;
9141         }
9142
9143         if (tg3_flag(tp, TSO_CAPABLE)) {
9144                 err = tg3_load_tso_firmware(tp);
9145                 if (err)
9146                         return err;
9147         }
9148
9149         tp->tx_mode = TX_MODE_ENABLE;
9150
9151         if (tg3_flag(tp, 5755_PLUS) ||
9152             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9153                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9154
9155         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9156                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9157                 tp->tx_mode &= ~val;
9158                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9159         }
9160
9161         tw32_f(MAC_TX_MODE, tp->tx_mode);
9162         udelay(100);
9163
9164         if (tg3_flag(tp, ENABLE_RSS)) {
9165                 tg3_rss_write_indir_tbl(tp);
9166
9167                 /* Setup the "secret" hash key. */
9168                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9169                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9170                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9171                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9172                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9173                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9174                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9175                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9176                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9177                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9178         }
9179
9180         tp->rx_mode = RX_MODE_ENABLE;
9181         if (tg3_flag(tp, 5755_PLUS))
9182                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9183
9184         if (tg3_flag(tp, ENABLE_RSS))
9185                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9186                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9187                                RX_MODE_RSS_IPV6_HASH_EN |
9188                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9189                                RX_MODE_RSS_IPV4_HASH_EN |
9190                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9191
9192         tw32_f(MAC_RX_MODE, tp->rx_mode);
9193         udelay(10);
9194
9195         tw32(MAC_LED_CTRL, tp->led_ctrl);
9196
9197         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9198         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9199                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9200                 udelay(10);
9201         }
9202         tw32_f(MAC_RX_MODE, tp->rx_mode);
9203         udelay(10);
9204
9205         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9206                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9207                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9208                         /* Set drive transmission level to 1.2V  */
9209                         /* only if the signal pre-emphasis bit is not set  */
9210                         val = tr32(MAC_SERDES_CFG);
9211                         val &= 0xfffff000;
9212                         val |= 0x880;
9213                         tw32(MAC_SERDES_CFG, val);
9214                 }
9215                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9216                         tw32(MAC_SERDES_CFG, 0x616000);
9217         }
9218
9219         /* Prevent chip from dropping frames when flow control
9220          * is enabled.
9221          */
9222         if (tg3_flag(tp, 57765_CLASS))
9223                 val = 1;
9224         else
9225                 val = 2;
9226         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9227
9228         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9229             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9230                 /* Use hardware link auto-negotiation */
9231                 tg3_flag_set(tp, HW_AUTONEG);
9232         }
9233
9234         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9235             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9236                 u32 tmp;
9237
9238                 tmp = tr32(SERDES_RX_CTRL);
9239                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9240                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9241                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9242                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9243         }
9244
9245         if (!tg3_flag(tp, USE_PHYLIB)) {
9246                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
9247                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9248                         tp->link_config.speed = tp->link_config.orig_speed;
9249                         tp->link_config.duplex = tp->link_config.orig_duplex;
9250                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
9251                 }
9252
9253                 err = tg3_setup_phy(tp, 0);
9254                 if (err)
9255                         return err;
9256
9257                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9258                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9259                         u32 tmp;
9260
9261                         /* Clear CRC stats. */
9262                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9263                                 tg3_writephy(tp, MII_TG3_TEST1,
9264                                              tmp | MII_TG3_TEST1_CRC_EN);
9265                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9266                         }
9267                 }
9268         }
9269
9270         __tg3_set_rx_mode(tp->dev);
9271
9272         /* Initialize receive rules. */
9273         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9274         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9275         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9276         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9277
9278         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9279                 limit = 8;
9280         else
9281                 limit = 16;
9282         if (tg3_flag(tp, ENABLE_ASF))
9283                 limit -= 4;
9284         switch (limit) {
9285         case 16:
9286                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9287         case 15:
9288                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9289         case 14:
9290                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9291         case 13:
9292                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9293         case 12:
9294                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9295         case 11:
9296                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9297         case 10:
9298                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9299         case 9:
9300                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9301         case 8:
9302                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9303         case 7:
9304                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9305         case 6:
9306                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9307         case 5:
9308                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9309         case 4:
9310                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9311         case 3:
9312                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9313         case 2:
9314         case 1:
9315
9316         default:
9317                 break;
9318         }
9319
9320         if (tg3_flag(tp, ENABLE_APE))
9321                 /* Write our heartbeat update interval to APE. */
9322                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9323                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9324
9325         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9326
9327         return 0;
9328 }
9329
9330 /* Called at device open time to get the chip ready for
9331  * packet processing.  Invoked with tp->lock held.
9332  */
9333 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9334 {
9335         tg3_switch_clocks(tp);
9336
9337         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9338
9339         return tg3_reset_hw(tp, reset_phy);
9340 }
9341
9342 /* Restart hardware after configuration changes, self-test, etc.
9343  * Invoked with tp->lock held.
9344  */
9345 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9346         __releases(tp->lock)
9347         __acquires(tp->lock)
9348 {
9349         int err;
9350
9351         err = tg3_init_hw(tp, reset_phy);
9352         if (err) {
9353                 netdev_err(tp->dev,
9354                            "Failed to re-initialize device, aborting\n");
9355                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9356                 tg3_full_unlock(tp);
9357                 del_timer_sync(&tp->timer);
9358                 tp->irq_sync = 0;
9359                 tg3_napi_enable(tp);
9360                 dev_close(tp->dev);
9361                 tg3_full_lock(tp, 0);
9362         }
9363         return err;
9364 }
9365
9366 static void tg3_reset_task(struct work_struct *work)
9367 {
9368         struct tg3 *tp = container_of(work, struct tg3, reset_task);
9369         int err;
9370
9371         tg3_full_lock(tp, 0);
9372
9373         if (!netif_running(tp->dev)) {
9374                 tg3_flag_clear(tp, RESET_TASK_PENDING);
9375                 tg3_full_unlock(tp);
9376                 return;
9377         }
9378
9379         tg3_full_unlock(tp);
9380
9381         tg3_phy_stop(tp);
9382
9383         tg3_netif_stop(tp);
9384
9385         tg3_full_lock(tp, 1);
9386
9387         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9388                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9389                 tp->write32_rx_mbox = tg3_write_flush_reg32;
9390                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9391                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9392         }
9393
9394         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9395         err = tg3_init_hw(tp, 1);
9396         if (err)
9397                 goto out;
9398
9399         tg3_netif_start(tp);
9400
9401 out:
9402         tg3_full_unlock(tp);
9403
9404         if (!err)
9405                 tg3_phy_start(tp);
9406
9407         tg3_flag_clear(tp, RESET_TASK_PENDING);
9408 }
9409
9410 #define TG3_STAT_ADD32(PSTAT, REG) \
9411 do {    u32 __val = tr32(REG); \
9412         (PSTAT)->low += __val; \
9413         if ((PSTAT)->low < __val) \
9414                 (PSTAT)->high += 1; \
9415 } while (0)
9416
9417 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9418 {
9419         struct tg3_hw_stats *sp = tp->hw_stats;
9420
9421         if (!netif_carrier_ok(tp->dev))
9422                 return;
9423
9424         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9425         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9426         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9427         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9428         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9429         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9430         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9431         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9432         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9433         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9434         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9435         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9436         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9437
9438         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9439         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9440         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9441         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9442         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9443         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9444         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9445         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9446         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9447         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9448         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9449         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9450         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9451         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9452
9453         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9454         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9455             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9456             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9457                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9458         } else {
9459                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9460                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9461                 if (val) {
9462                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9463                         sp->rx_discards.low += val;
9464                         if (sp->rx_discards.low < val)
9465                                 sp->rx_discards.high += 1;
9466                 }
9467                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9468         }
9469         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9470 }
9471
9472 static void tg3_chk_missed_msi(struct tg3 *tp)
9473 {
9474         u32 i;
9475
9476         for (i = 0; i < tp->irq_cnt; i++) {
9477                 struct tg3_napi *tnapi = &tp->napi[i];
9478
9479                 if (tg3_has_work(tnapi)) {
9480                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9481                             tnapi->last_tx_cons == tnapi->tx_cons) {
9482                                 if (tnapi->chk_msi_cnt < 1) {
9483                                         tnapi->chk_msi_cnt++;
9484                                         return;
9485                                 }
9486                                 tg3_msi(0, tnapi);
9487                         }
9488                 }
9489                 tnapi->chk_msi_cnt = 0;
9490                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9491                 tnapi->last_tx_cons = tnapi->tx_cons;
9492         }
9493 }
9494
9495 static void tg3_timer(unsigned long __opaque)
9496 {
9497         struct tg3 *tp = (struct tg3 *) __opaque;
9498
9499         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9500                 goto restart_timer;
9501
9502         spin_lock(&tp->lock);
9503
9504         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9505             tg3_flag(tp, 57765_CLASS))
9506                 tg3_chk_missed_msi(tp);
9507
9508         if (!tg3_flag(tp, TAGGED_STATUS)) {
9509                 /* All of this garbage is because when using non-tagged
9510                  * IRQ status the mailbox/status_block protocol the chip
9511                  * uses with the cpu is race prone.
9512                  */
9513                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9514                         tw32(GRC_LOCAL_CTRL,
9515                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9516                 } else {
9517                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9518                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9519                 }
9520
9521                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9522                         spin_unlock(&tp->lock);
9523                         tg3_reset_task_schedule(tp);
9524                         goto restart_timer;
9525                 }
9526         }
9527
9528         /* This part only runs once per second. */
9529         if (!--tp->timer_counter) {
9530                 if (tg3_flag(tp, 5705_PLUS))
9531                         tg3_periodic_fetch_stats(tp);
9532
9533                 if (tp->setlpicnt && !--tp->setlpicnt)
9534                         tg3_phy_eee_enable(tp);
9535
9536                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9537                         u32 mac_stat;
9538                         int phy_event;
9539
9540                         mac_stat = tr32(MAC_STATUS);
9541
9542                         phy_event = 0;
9543                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9544                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9545                                         phy_event = 1;
9546                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9547                                 phy_event = 1;
9548
9549                         if (phy_event)
9550                                 tg3_setup_phy(tp, 0);
9551                 } else if (tg3_flag(tp, POLL_SERDES)) {
9552                         u32 mac_stat = tr32(MAC_STATUS);
9553                         int need_setup = 0;
9554
9555                         if (netif_carrier_ok(tp->dev) &&
9556                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9557                                 need_setup = 1;
9558                         }
9559                         if (!netif_carrier_ok(tp->dev) &&
9560                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9561                                          MAC_STATUS_SIGNAL_DET))) {
9562                                 need_setup = 1;
9563                         }
9564                         if (need_setup) {
9565                                 if (!tp->serdes_counter) {
9566                                         tw32_f(MAC_MODE,
9567                                              (tp->mac_mode &
9568                                               ~MAC_MODE_PORT_MODE_MASK));
9569                                         udelay(40);
9570                                         tw32_f(MAC_MODE, tp->mac_mode);
9571                                         udelay(40);
9572                                 }
9573                                 tg3_setup_phy(tp, 0);
9574                         }
9575                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9576                            tg3_flag(tp, 5780_CLASS)) {
9577                         tg3_serdes_parallel_detect(tp);
9578                 }
9579
9580                 tp->timer_counter = tp->timer_multiplier;
9581         }
9582
9583         /* Heartbeat is only sent once every 2 seconds.
9584          *
9585          * The heartbeat is to tell the ASF firmware that the host
9586          * driver is still alive.  In the event that the OS crashes,
9587          * ASF needs to reset the hardware to free up the FIFO space
9588          * that may be filled with rx packets destined for the host.
9589          * If the FIFO is full, ASF will no longer function properly.
9590          *
9591          * Unintended resets have been reported on real time kernels
9592          * where the timer doesn't run on time.  Netpoll will also have
9593          * same problem.
9594          *
9595          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9596          * to check the ring condition when the heartbeat is expiring
9597          * before doing the reset.  This will prevent most unintended
9598          * resets.
9599          */
9600         if (!--tp->asf_counter) {
9601                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9602                         tg3_wait_for_event_ack(tp);
9603
9604                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9605                                       FWCMD_NICDRV_ALIVE3);
9606                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9607                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9608                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9609
9610                         tg3_generate_fw_event(tp);
9611                 }
9612                 tp->asf_counter = tp->asf_multiplier;
9613         }
9614
9615         spin_unlock(&tp->lock);
9616
9617 restart_timer:
9618         tp->timer.expires = jiffies + tp->timer_offset;
9619         add_timer(&tp->timer);
9620 }
9621
9622 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9623 {
9624         irq_handler_t fn;
9625         unsigned long flags;
9626         char *name;
9627         struct tg3_napi *tnapi = &tp->napi[irq_num];
9628
9629         if (tp->irq_cnt == 1)
9630                 name = tp->dev->name;
9631         else {
9632                 name = &tnapi->irq_lbl[0];
9633                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9634                 name[IFNAMSIZ-1] = 0;
9635         }
9636
9637         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9638                 fn = tg3_msi;
9639                 if (tg3_flag(tp, 1SHOT_MSI))
9640                         fn = tg3_msi_1shot;
9641                 flags = 0;
9642         } else {
9643                 fn = tg3_interrupt;
9644                 if (tg3_flag(tp, TAGGED_STATUS))
9645                         fn = tg3_interrupt_tagged;
9646                 flags = IRQF_SHARED;
9647         }
9648
9649         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9650 }
9651
9652 static int tg3_test_interrupt(struct tg3 *tp)
9653 {
9654         struct tg3_napi *tnapi = &tp->napi[0];
9655         struct net_device *dev = tp->dev;
9656         int err, i, intr_ok = 0;
9657         u32 val;
9658
9659         if (!netif_running(dev))
9660                 return -ENODEV;
9661
9662         tg3_disable_ints(tp);
9663
9664         free_irq(tnapi->irq_vec, tnapi);
9665
9666         /*
9667          * Turn off MSI one shot mode.  Otherwise this test has no
9668          * observable way to know whether the interrupt was delivered.
9669          */
9670         if (tg3_flag(tp, 57765_PLUS)) {
9671                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9672                 tw32(MSGINT_MODE, val);
9673         }
9674
9675         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9676                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9677         if (err)
9678                 return err;
9679
9680         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9681         tg3_enable_ints(tp);
9682
9683         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9684                tnapi->coal_now);
9685
9686         for (i = 0; i < 5; i++) {
9687                 u32 int_mbox, misc_host_ctrl;
9688
9689                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9690                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9691
9692                 if ((int_mbox != 0) ||
9693                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9694                         intr_ok = 1;
9695                         break;
9696                 }
9697
9698                 if (tg3_flag(tp, 57765_PLUS) &&
9699                     tnapi->hw_status->status_tag != tnapi->last_tag)
9700                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9701
9702                 msleep(10);
9703         }
9704
9705         tg3_disable_ints(tp);
9706
9707         free_irq(tnapi->irq_vec, tnapi);
9708
9709         err = tg3_request_irq(tp, 0);
9710
9711         if (err)
9712                 return err;
9713
9714         if (intr_ok) {
9715                 /* Reenable MSI one shot mode. */
9716                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9717                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9718                         tw32(MSGINT_MODE, val);
9719                 }
9720                 return 0;
9721         }
9722
9723         return -EIO;
9724 }
9725
9726 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9727  * successfully restored
9728  */
9729 static int tg3_test_msi(struct tg3 *tp)
9730 {
9731         int err;
9732         u16 pci_cmd;
9733
9734         if (!tg3_flag(tp, USING_MSI))
9735                 return 0;
9736
9737         /* Turn off SERR reporting in case MSI terminates with Master
9738          * Abort.
9739          */
9740         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9741         pci_write_config_word(tp->pdev, PCI_COMMAND,
9742                               pci_cmd & ~PCI_COMMAND_SERR);
9743
9744         err = tg3_test_interrupt(tp);
9745
9746         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9747
9748         if (!err)
9749                 return 0;
9750
9751         /* other failures */
9752         if (err != -EIO)
9753                 return err;
9754
9755         /* MSI test failed, go back to INTx mode */
9756         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9757                     "to INTx mode. Please report this failure to the PCI "
9758                     "maintainer and include system chipset information\n");
9759
9760         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9761
9762         pci_disable_msi(tp->pdev);
9763
9764         tg3_flag_clear(tp, USING_MSI);
9765         tp->napi[0].irq_vec = tp->pdev->irq;
9766
9767         err = tg3_request_irq(tp, 0);
9768         if (err)
9769                 return err;
9770
9771         /* Need to reset the chip because the MSI cycle may have terminated
9772          * with Master Abort.
9773          */
9774         tg3_full_lock(tp, 1);
9775
9776         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9777         err = tg3_init_hw(tp, 1);
9778
9779         tg3_full_unlock(tp);
9780
9781         if (err)
9782                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9783
9784         return err;
9785 }
9786
9787 static int tg3_request_firmware(struct tg3 *tp)
9788 {
9789         const __be32 *fw_data;
9790
9791         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9792                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9793                            tp->fw_needed);
9794                 return -ENOENT;
9795         }
9796
9797         fw_data = (void *)tp->fw->data;
9798
9799         /* Firmware blob starts with version numbers, followed by
9800          * start address and _full_ length including BSS sections
9801          * (which must be longer than the actual data, of course
9802          */
9803
9804         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9805         if (tp->fw_len < (tp->fw->size - 12)) {
9806                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9807                            tp->fw_len, tp->fw_needed);
9808                 release_firmware(tp->fw);
9809                 tp->fw = NULL;
9810                 return -EINVAL;
9811         }
9812
9813         /* We no longer need firmware; we have it. */
9814         tp->fw_needed = NULL;
9815         return 0;
9816 }
9817
9818 static bool tg3_enable_msix(struct tg3 *tp)
9819 {
9820         int i, rc;
9821         struct msix_entry msix_ent[tp->irq_max];
9822
9823         tp->irq_cnt = num_online_cpus();
9824         if (tp->irq_cnt > 1) {
9825                 /* We want as many rx rings enabled as there are cpus.
9826                  * In multiqueue MSI-X mode, the first MSI-X vector
9827                  * only deals with link interrupts, etc, so we add
9828                  * one to the number of vectors we are requesting.
9829                  */
9830                 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
9831         }
9832
9833         for (i = 0; i < tp->irq_max; i++) {
9834                 msix_ent[i].entry  = i;
9835                 msix_ent[i].vector = 0;
9836         }
9837
9838         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9839         if (rc < 0) {
9840                 return false;
9841         } else if (rc != 0) {
9842                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9843                         return false;
9844                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9845                               tp->irq_cnt, rc);
9846                 tp->irq_cnt = rc;
9847         }
9848
9849         for (i = 0; i < tp->irq_max; i++)
9850                 tp->napi[i].irq_vec = msix_ent[i].vector;
9851
9852         netif_set_real_num_tx_queues(tp->dev, 1);
9853         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9854         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9855                 pci_disable_msix(tp->pdev);
9856                 return false;
9857         }
9858
9859         if (tp->irq_cnt > 1) {
9860                 tg3_flag_set(tp, ENABLE_RSS);
9861
9862                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9863                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9864                         tg3_flag_set(tp, ENABLE_TSS);
9865                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9866                 }
9867         }
9868
9869         return true;
9870 }
9871
9872 static void tg3_ints_init(struct tg3 *tp)
9873 {
9874         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9875             !tg3_flag(tp, TAGGED_STATUS)) {
9876                 /* All MSI supporting chips should support tagged
9877                  * status.  Assert that this is the case.
9878                  */
9879                 netdev_warn(tp->dev,
9880                             "MSI without TAGGED_STATUS? Not using MSI\n");
9881                 goto defcfg;
9882         }
9883
9884         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9885                 tg3_flag_set(tp, USING_MSIX);
9886         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9887                 tg3_flag_set(tp, USING_MSI);
9888
9889         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9890                 u32 msi_mode = tr32(MSGINT_MODE);
9891                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9892                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9893                 if (!tg3_flag(tp, 1SHOT_MSI))
9894                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9895                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9896         }
9897 defcfg:
9898         if (!tg3_flag(tp, USING_MSIX)) {
9899                 tp->irq_cnt = 1;
9900                 tp->napi[0].irq_vec = tp->pdev->irq;
9901                 netif_set_real_num_tx_queues(tp->dev, 1);
9902                 netif_set_real_num_rx_queues(tp->dev, 1);
9903         }
9904 }
9905
9906 static void tg3_ints_fini(struct tg3 *tp)
9907 {
9908         if (tg3_flag(tp, USING_MSIX))
9909                 pci_disable_msix(tp->pdev);
9910         else if (tg3_flag(tp, USING_MSI))
9911                 pci_disable_msi(tp->pdev);
9912         tg3_flag_clear(tp, USING_MSI);
9913         tg3_flag_clear(tp, USING_MSIX);
9914         tg3_flag_clear(tp, ENABLE_RSS);
9915         tg3_flag_clear(tp, ENABLE_TSS);
9916 }
9917
9918 static int tg3_open(struct net_device *dev)
9919 {
9920         struct tg3 *tp = netdev_priv(dev);
9921         int i, err;
9922
9923         if (tp->fw_needed) {
9924                 err = tg3_request_firmware(tp);
9925                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9926                         if (err)
9927                                 return err;
9928                 } else if (err) {
9929                         netdev_warn(tp->dev, "TSO capability disabled\n");
9930                         tg3_flag_clear(tp, TSO_CAPABLE);
9931                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9932                         netdev_notice(tp->dev, "TSO capability restored\n");
9933                         tg3_flag_set(tp, TSO_CAPABLE);
9934                 }
9935         }
9936
9937         netif_carrier_off(tp->dev);
9938
9939         err = tg3_power_up(tp);
9940         if (err)
9941                 return err;
9942
9943         tg3_full_lock(tp, 0);
9944
9945         tg3_disable_ints(tp);
9946         tg3_flag_clear(tp, INIT_COMPLETE);
9947
9948         tg3_full_unlock(tp);
9949
9950         /*
9951          * Setup interrupts first so we know how
9952          * many NAPI resources to allocate
9953          */
9954         tg3_ints_init(tp);
9955
9956         tg3_rss_check_indir_tbl(tp);
9957
9958         /* The placement of this call is tied
9959          * to the setup and use of Host TX descriptors.
9960          */
9961         err = tg3_alloc_consistent(tp);
9962         if (err)
9963                 goto err_out1;
9964
9965         tg3_napi_init(tp);
9966
9967         tg3_napi_enable(tp);
9968
9969         for (i = 0; i < tp->irq_cnt; i++) {
9970                 struct tg3_napi *tnapi = &tp->napi[i];
9971                 err = tg3_request_irq(tp, i);
9972                 if (err) {
9973                         for (i--; i >= 0; i--) {
9974                                 tnapi = &tp->napi[i];
9975                                 free_irq(tnapi->irq_vec, tnapi);
9976                         }
9977                         goto err_out2;
9978                 }
9979         }
9980
9981         tg3_full_lock(tp, 0);
9982
9983         err = tg3_init_hw(tp, 1);
9984         if (err) {
9985                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9986                 tg3_free_rings(tp);
9987         } else {
9988                 if (tg3_flag(tp, TAGGED_STATUS) &&
9989                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9990                     !tg3_flag(tp, 57765_CLASS))
9991                         tp->timer_offset = HZ;
9992                 else
9993                         tp->timer_offset = HZ / 10;
9994
9995                 BUG_ON(tp->timer_offset > HZ);
9996                 tp->timer_counter = tp->timer_multiplier =
9997                         (HZ / tp->timer_offset);
9998                 tp->asf_counter = tp->asf_multiplier =
9999                         ((HZ / tp->timer_offset) * 2);
10000
10001                 init_timer(&tp->timer);
10002                 tp->timer.expires = jiffies + tp->timer_offset;
10003                 tp->timer.data = (unsigned long) tp;
10004                 tp->timer.function = tg3_timer;
10005         }
10006
10007         tg3_full_unlock(tp);
10008
10009         if (err)
10010                 goto err_out3;
10011
10012         if (tg3_flag(tp, USING_MSI)) {
10013                 err = tg3_test_msi(tp);
10014
10015                 if (err) {
10016                         tg3_full_lock(tp, 0);
10017                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10018                         tg3_free_rings(tp);
10019                         tg3_full_unlock(tp);
10020
10021                         goto err_out2;
10022                 }
10023
10024                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10025                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10026
10027                         tw32(PCIE_TRANSACTION_CFG,
10028                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10029                 }
10030         }
10031
10032         tg3_phy_start(tp);
10033
10034         tg3_full_lock(tp, 0);
10035
10036         add_timer(&tp->timer);
10037         tg3_flag_set(tp, INIT_COMPLETE);
10038         tg3_enable_ints(tp);
10039
10040         tg3_full_unlock(tp);
10041
10042         netif_tx_start_all_queues(dev);
10043
10044         /*
10045          * Reset loopback feature if it was turned on while the device was down
10046          * make sure that it's installed properly now.
10047          */
10048         if (dev->features & NETIF_F_LOOPBACK)
10049                 tg3_set_loopback(dev, dev->features);
10050
10051         return 0;
10052
10053 err_out3:
10054         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10055                 struct tg3_napi *tnapi = &tp->napi[i];
10056                 free_irq(tnapi->irq_vec, tnapi);
10057         }
10058
10059 err_out2:
10060         tg3_napi_disable(tp);
10061         tg3_napi_fini(tp);
10062         tg3_free_consistent(tp);
10063
10064 err_out1:
10065         tg3_ints_fini(tp);
10066         tg3_frob_aux_power(tp, false);
10067         pci_set_power_state(tp->pdev, PCI_D3hot);
10068         return err;
10069 }
10070
10071 static int tg3_close(struct net_device *dev)
10072 {
10073         int i;
10074         struct tg3 *tp = netdev_priv(dev);
10075
10076         tg3_napi_disable(tp);
10077         tg3_reset_task_cancel(tp);
10078
10079         netif_tx_stop_all_queues(dev);
10080
10081         del_timer_sync(&tp->timer);
10082
10083         tg3_phy_stop(tp);
10084
10085         tg3_full_lock(tp, 1);
10086
10087         tg3_disable_ints(tp);
10088
10089         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10090         tg3_free_rings(tp);
10091         tg3_flag_clear(tp, INIT_COMPLETE);
10092
10093         tg3_full_unlock(tp);
10094
10095         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10096                 struct tg3_napi *tnapi = &tp->napi[i];
10097                 free_irq(tnapi->irq_vec, tnapi);
10098         }
10099
10100         tg3_ints_fini(tp);
10101
10102         /* Clear stats across close / open calls */
10103         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10104         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10105
10106         tg3_napi_fini(tp);
10107
10108         tg3_free_consistent(tp);
10109
10110         tg3_power_down(tp);
10111
10112         netif_carrier_off(tp->dev);
10113
10114         return 0;
10115 }
10116
10117 static inline u64 get_stat64(tg3_stat64_t *val)
10118 {
10119        return ((u64)val->high << 32) | ((u64)val->low);
10120 }
10121
10122 static u64 calc_crc_errors(struct tg3 *tp)
10123 {
10124         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10125
10126         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10127             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10128              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10129                 u32 val;
10130
10131                 spin_lock_bh(&tp->lock);
10132                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10133                         tg3_writephy(tp, MII_TG3_TEST1,
10134                                      val | MII_TG3_TEST1_CRC_EN);
10135                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10136                 } else
10137                         val = 0;
10138                 spin_unlock_bh(&tp->lock);
10139
10140                 tp->phy_crc_errors += val;
10141
10142                 return tp->phy_crc_errors;
10143         }
10144
10145         return get_stat64(&hw_stats->rx_fcs_errors);
10146 }
10147
10148 #define ESTAT_ADD(member) \
10149         estats->member =        old_estats->member + \
10150                                 get_stat64(&hw_stats->member)
10151
10152 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp,
10153                                                struct tg3_ethtool_stats *estats)
10154 {
10155         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10156         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10157
10158         if (!hw_stats)
10159                 return old_estats;
10160
10161         ESTAT_ADD(rx_octets);
10162         ESTAT_ADD(rx_fragments);
10163         ESTAT_ADD(rx_ucast_packets);
10164         ESTAT_ADD(rx_mcast_packets);
10165         ESTAT_ADD(rx_bcast_packets);
10166         ESTAT_ADD(rx_fcs_errors);
10167         ESTAT_ADD(rx_align_errors);
10168         ESTAT_ADD(rx_xon_pause_rcvd);
10169         ESTAT_ADD(rx_xoff_pause_rcvd);
10170         ESTAT_ADD(rx_mac_ctrl_rcvd);
10171         ESTAT_ADD(rx_xoff_entered);
10172         ESTAT_ADD(rx_frame_too_long_errors);
10173         ESTAT_ADD(rx_jabbers);
10174         ESTAT_ADD(rx_undersize_packets);
10175         ESTAT_ADD(rx_in_length_errors);
10176         ESTAT_ADD(rx_out_length_errors);
10177         ESTAT_ADD(rx_64_or_less_octet_packets);
10178         ESTAT_ADD(rx_65_to_127_octet_packets);
10179         ESTAT_ADD(rx_128_to_255_octet_packets);
10180         ESTAT_ADD(rx_256_to_511_octet_packets);
10181         ESTAT_ADD(rx_512_to_1023_octet_packets);
10182         ESTAT_ADD(rx_1024_to_1522_octet_packets);
10183         ESTAT_ADD(rx_1523_to_2047_octet_packets);
10184         ESTAT_ADD(rx_2048_to_4095_octet_packets);
10185         ESTAT_ADD(rx_4096_to_8191_octet_packets);
10186         ESTAT_ADD(rx_8192_to_9022_octet_packets);
10187
10188         ESTAT_ADD(tx_octets);
10189         ESTAT_ADD(tx_collisions);
10190         ESTAT_ADD(tx_xon_sent);
10191         ESTAT_ADD(tx_xoff_sent);
10192         ESTAT_ADD(tx_flow_control);
10193         ESTAT_ADD(tx_mac_errors);
10194         ESTAT_ADD(tx_single_collisions);
10195         ESTAT_ADD(tx_mult_collisions);
10196         ESTAT_ADD(tx_deferred);
10197         ESTAT_ADD(tx_excessive_collisions);
10198         ESTAT_ADD(tx_late_collisions);
10199         ESTAT_ADD(tx_collide_2times);
10200         ESTAT_ADD(tx_collide_3times);
10201         ESTAT_ADD(tx_collide_4times);
10202         ESTAT_ADD(tx_collide_5times);
10203         ESTAT_ADD(tx_collide_6times);
10204         ESTAT_ADD(tx_collide_7times);
10205         ESTAT_ADD(tx_collide_8times);
10206         ESTAT_ADD(tx_collide_9times);
10207         ESTAT_ADD(tx_collide_10times);
10208         ESTAT_ADD(tx_collide_11times);
10209         ESTAT_ADD(tx_collide_12times);
10210         ESTAT_ADD(tx_collide_13times);
10211         ESTAT_ADD(tx_collide_14times);
10212         ESTAT_ADD(tx_collide_15times);
10213         ESTAT_ADD(tx_ucast_packets);
10214         ESTAT_ADD(tx_mcast_packets);
10215         ESTAT_ADD(tx_bcast_packets);
10216         ESTAT_ADD(tx_carrier_sense_errors);
10217         ESTAT_ADD(tx_discards);
10218         ESTAT_ADD(tx_errors);
10219
10220         ESTAT_ADD(dma_writeq_full);
10221         ESTAT_ADD(dma_write_prioq_full);
10222         ESTAT_ADD(rxbds_empty);
10223         ESTAT_ADD(rx_discards);
10224         ESTAT_ADD(rx_errors);
10225         ESTAT_ADD(rx_threshold_hit);
10226
10227         ESTAT_ADD(dma_readq_full);
10228         ESTAT_ADD(dma_read_prioq_full);
10229         ESTAT_ADD(tx_comp_queue_full);
10230
10231         ESTAT_ADD(ring_set_send_prod_index);
10232         ESTAT_ADD(ring_status_update);
10233         ESTAT_ADD(nic_irqs);
10234         ESTAT_ADD(nic_avoided_irqs);
10235         ESTAT_ADD(nic_tx_threshold_hit);
10236
10237         ESTAT_ADD(mbuf_lwm_thresh_hit);
10238
10239         return estats;
10240 }
10241
10242 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
10243                                                  struct rtnl_link_stats64 *stats)
10244 {
10245         struct tg3 *tp = netdev_priv(dev);
10246         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10247         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10248
10249         if (!hw_stats)
10250                 return old_stats;
10251
10252         stats->rx_packets = old_stats->rx_packets +
10253                 get_stat64(&hw_stats->rx_ucast_packets) +
10254                 get_stat64(&hw_stats->rx_mcast_packets) +
10255                 get_stat64(&hw_stats->rx_bcast_packets);
10256
10257         stats->tx_packets = old_stats->tx_packets +
10258                 get_stat64(&hw_stats->tx_ucast_packets) +
10259                 get_stat64(&hw_stats->tx_mcast_packets) +
10260                 get_stat64(&hw_stats->tx_bcast_packets);
10261
10262         stats->rx_bytes = old_stats->rx_bytes +
10263                 get_stat64(&hw_stats->rx_octets);
10264         stats->tx_bytes = old_stats->tx_bytes +
10265                 get_stat64(&hw_stats->tx_octets);
10266
10267         stats->rx_errors = old_stats->rx_errors +
10268                 get_stat64(&hw_stats->rx_errors);
10269         stats->tx_errors = old_stats->tx_errors +
10270                 get_stat64(&hw_stats->tx_errors) +
10271                 get_stat64(&hw_stats->tx_mac_errors) +
10272                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10273                 get_stat64(&hw_stats->tx_discards);
10274
10275         stats->multicast = old_stats->multicast +
10276                 get_stat64(&hw_stats->rx_mcast_packets);
10277         stats->collisions = old_stats->collisions +
10278                 get_stat64(&hw_stats->tx_collisions);
10279
10280         stats->rx_length_errors = old_stats->rx_length_errors +
10281                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10282                 get_stat64(&hw_stats->rx_undersize_packets);
10283
10284         stats->rx_over_errors = old_stats->rx_over_errors +
10285                 get_stat64(&hw_stats->rxbds_empty);
10286         stats->rx_frame_errors = old_stats->rx_frame_errors +
10287                 get_stat64(&hw_stats->rx_align_errors);
10288         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10289                 get_stat64(&hw_stats->tx_discards);
10290         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10291                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10292
10293         stats->rx_crc_errors = old_stats->rx_crc_errors +
10294                 calc_crc_errors(tp);
10295
10296         stats->rx_missed_errors = old_stats->rx_missed_errors +
10297                 get_stat64(&hw_stats->rx_discards);
10298
10299         stats->rx_dropped = tp->rx_dropped;
10300         stats->tx_dropped = tp->tx_dropped;
10301
10302         return stats;
10303 }
10304
10305 static int tg3_get_regs_len(struct net_device *dev)
10306 {
10307         return TG3_REG_BLK_SIZE;
10308 }
10309
10310 static void tg3_get_regs(struct net_device *dev,
10311                 struct ethtool_regs *regs, void *_p)
10312 {
10313         struct tg3 *tp = netdev_priv(dev);
10314
10315         regs->version = 0;
10316
10317         memset(_p, 0, TG3_REG_BLK_SIZE);
10318
10319         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10320                 return;
10321
10322         tg3_full_lock(tp, 0);
10323
10324         tg3_dump_legacy_regs(tp, (u32 *)_p);
10325
10326         tg3_full_unlock(tp);
10327 }
10328
10329 static int tg3_get_eeprom_len(struct net_device *dev)
10330 {
10331         struct tg3 *tp = netdev_priv(dev);
10332
10333         return tp->nvram_size;
10334 }
10335
10336 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10337 {
10338         struct tg3 *tp = netdev_priv(dev);
10339         int ret;
10340         u8  *pd;
10341         u32 i, offset, len, b_offset, b_count;
10342         __be32 val;
10343
10344         if (tg3_flag(tp, NO_NVRAM))
10345                 return -EINVAL;
10346
10347         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10348                 return -EAGAIN;
10349
10350         offset = eeprom->offset;
10351         len = eeprom->len;
10352         eeprom->len = 0;
10353
10354         eeprom->magic = TG3_EEPROM_MAGIC;
10355
10356         if (offset & 3) {
10357                 /* adjustments to start on required 4 byte boundary */
10358                 b_offset = offset & 3;
10359                 b_count = 4 - b_offset;
10360                 if (b_count > len) {
10361                         /* i.e. offset=1 len=2 */
10362                         b_count = len;
10363                 }
10364                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10365                 if (ret)
10366                         return ret;
10367                 memcpy(data, ((char *)&val) + b_offset, b_count);
10368                 len -= b_count;
10369                 offset += b_count;
10370                 eeprom->len += b_count;
10371         }
10372
10373         /* read bytes up to the last 4 byte boundary */
10374         pd = &data[eeprom->len];
10375         for (i = 0; i < (len - (len & 3)); i += 4) {
10376                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10377                 if (ret) {
10378                         eeprom->len += i;
10379                         return ret;
10380                 }
10381                 memcpy(pd + i, &val, 4);
10382         }
10383         eeprom->len += i;
10384
10385         if (len & 3) {
10386                 /* read last bytes not ending on 4 byte boundary */
10387                 pd = &data[eeprom->len];
10388                 b_count = len & 3;
10389                 b_offset = offset + len - b_count;
10390                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10391                 if (ret)
10392                         return ret;
10393                 memcpy(pd, &val, b_count);
10394                 eeprom->len += b_count;
10395         }
10396         return 0;
10397 }
10398
10399 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10400 {
10401         struct tg3 *tp = netdev_priv(dev);
10402         int ret;
10403         u32 offset, len, b_offset, odd_len;
10404         u8 *buf;
10405         __be32 start, end;
10406
10407         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10408                 return -EAGAIN;
10409
10410         if (tg3_flag(tp, NO_NVRAM) ||
10411             eeprom->magic != TG3_EEPROM_MAGIC)
10412                 return -EINVAL;
10413
10414         offset = eeprom->offset;
10415         len = eeprom->len;
10416
10417         if ((b_offset = (offset & 3))) {
10418                 /* adjustments to start on required 4 byte boundary */
10419                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10420                 if (ret)
10421                         return ret;
10422                 len += b_offset;
10423                 offset &= ~3;
10424                 if (len < 4)
10425                         len = 4;
10426         }
10427
10428         odd_len = 0;
10429         if (len & 3) {
10430                 /* adjustments to end on required 4 byte boundary */
10431                 odd_len = 1;
10432                 len = (len + 3) & ~3;
10433                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10434                 if (ret)
10435                         return ret;
10436         }
10437
10438         buf = data;
10439         if (b_offset || odd_len) {
10440                 buf = kmalloc(len, GFP_KERNEL);
10441                 if (!buf)
10442                         return -ENOMEM;
10443                 if (b_offset)
10444                         memcpy(buf, &start, 4);
10445                 if (odd_len)
10446                         memcpy(buf+len-4, &end, 4);
10447                 memcpy(buf + b_offset, data, eeprom->len);
10448         }
10449
10450         ret = tg3_nvram_write_block(tp, offset, len, buf);
10451
10452         if (buf != data)
10453                 kfree(buf);
10454
10455         return ret;
10456 }
10457
10458 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10459 {
10460         struct tg3 *tp = netdev_priv(dev);
10461
10462         if (tg3_flag(tp, USE_PHYLIB)) {
10463                 struct phy_device *phydev;
10464                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10465                         return -EAGAIN;
10466                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10467                 return phy_ethtool_gset(phydev, cmd);
10468         }
10469
10470         cmd->supported = (SUPPORTED_Autoneg);
10471
10472         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10473                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10474                                    SUPPORTED_1000baseT_Full);
10475
10476         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10477                 cmd->supported |= (SUPPORTED_100baseT_Half |
10478                                   SUPPORTED_100baseT_Full |
10479                                   SUPPORTED_10baseT_Half |
10480                                   SUPPORTED_10baseT_Full |
10481                                   SUPPORTED_TP);
10482                 cmd->port = PORT_TP;
10483         } else {
10484                 cmd->supported |= SUPPORTED_FIBRE;
10485                 cmd->port = PORT_FIBRE;
10486         }
10487
10488         cmd->advertising = tp->link_config.advertising;
10489         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10490                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10491                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10492                                 cmd->advertising |= ADVERTISED_Pause;
10493                         } else {
10494                                 cmd->advertising |= ADVERTISED_Pause |
10495                                                     ADVERTISED_Asym_Pause;
10496                         }
10497                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10498                         cmd->advertising |= ADVERTISED_Asym_Pause;
10499                 }
10500         }
10501         if (netif_running(dev) && netif_carrier_ok(dev)) {
10502                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10503                 cmd->duplex = tp->link_config.active_duplex;
10504                 cmd->lp_advertising = tp->link_config.rmt_adv;
10505                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10506                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10507                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10508                         else
10509                                 cmd->eth_tp_mdix = ETH_TP_MDI;
10510                 }
10511         } else {
10512                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10513                 cmd->duplex = DUPLEX_INVALID;
10514                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10515         }
10516         cmd->phy_address = tp->phy_addr;
10517         cmd->transceiver = XCVR_INTERNAL;
10518         cmd->autoneg = tp->link_config.autoneg;
10519         cmd->maxtxpkt = 0;
10520         cmd->maxrxpkt = 0;
10521         return 0;
10522 }
10523
10524 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10525 {
10526         struct tg3 *tp = netdev_priv(dev);
10527         u32 speed = ethtool_cmd_speed(cmd);
10528
10529         if (tg3_flag(tp, USE_PHYLIB)) {
10530                 struct phy_device *phydev;
10531                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10532                         return -EAGAIN;
10533                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10534                 return phy_ethtool_sset(phydev, cmd);
10535         }
10536
10537         if (cmd->autoneg != AUTONEG_ENABLE &&
10538             cmd->autoneg != AUTONEG_DISABLE)
10539                 return -EINVAL;
10540
10541         if (cmd->autoneg == AUTONEG_DISABLE &&
10542             cmd->duplex != DUPLEX_FULL &&
10543             cmd->duplex != DUPLEX_HALF)
10544                 return -EINVAL;
10545
10546         if (cmd->autoneg == AUTONEG_ENABLE) {
10547                 u32 mask = ADVERTISED_Autoneg |
10548                            ADVERTISED_Pause |
10549                            ADVERTISED_Asym_Pause;
10550
10551                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10552                         mask |= ADVERTISED_1000baseT_Half |
10553                                 ADVERTISED_1000baseT_Full;
10554
10555                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10556                         mask |= ADVERTISED_100baseT_Half |
10557                                 ADVERTISED_100baseT_Full |
10558                                 ADVERTISED_10baseT_Half |
10559                                 ADVERTISED_10baseT_Full |
10560                                 ADVERTISED_TP;
10561                 else
10562                         mask |= ADVERTISED_FIBRE;
10563
10564                 if (cmd->advertising & ~mask)
10565                         return -EINVAL;
10566
10567                 mask &= (ADVERTISED_1000baseT_Half |
10568                          ADVERTISED_1000baseT_Full |
10569                          ADVERTISED_100baseT_Half |
10570                          ADVERTISED_100baseT_Full |
10571                          ADVERTISED_10baseT_Half |
10572                          ADVERTISED_10baseT_Full);
10573
10574                 cmd->advertising &= mask;
10575         } else {
10576                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10577                         if (speed != SPEED_1000)
10578                                 return -EINVAL;
10579
10580                         if (cmd->duplex != DUPLEX_FULL)
10581                                 return -EINVAL;
10582                 } else {
10583                         if (speed != SPEED_100 &&
10584                             speed != SPEED_10)
10585                                 return -EINVAL;
10586                 }
10587         }
10588
10589         tg3_full_lock(tp, 0);
10590
10591         tp->link_config.autoneg = cmd->autoneg;
10592         if (cmd->autoneg == AUTONEG_ENABLE) {
10593                 tp->link_config.advertising = (cmd->advertising |
10594                                               ADVERTISED_Autoneg);
10595                 tp->link_config.speed = SPEED_INVALID;
10596                 tp->link_config.duplex = DUPLEX_INVALID;
10597         } else {
10598                 tp->link_config.advertising = 0;
10599                 tp->link_config.speed = speed;
10600                 tp->link_config.duplex = cmd->duplex;
10601         }
10602
10603         tp->link_config.orig_speed = tp->link_config.speed;
10604         tp->link_config.orig_duplex = tp->link_config.duplex;
10605         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10606
10607         if (netif_running(dev))
10608                 tg3_setup_phy(tp, 1);
10609
10610         tg3_full_unlock(tp);
10611
10612         return 0;
10613 }
10614
10615 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10616 {
10617         struct tg3 *tp = netdev_priv(dev);
10618
10619         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10620         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10621         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10622         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10623 }
10624
10625 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10626 {
10627         struct tg3 *tp = netdev_priv(dev);
10628
10629         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10630                 wol->supported = WAKE_MAGIC;
10631         else
10632                 wol->supported = 0;
10633         wol->wolopts = 0;
10634         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10635                 wol->wolopts = WAKE_MAGIC;
10636         memset(&wol->sopass, 0, sizeof(wol->sopass));
10637 }
10638
10639 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10640 {
10641         struct tg3 *tp = netdev_priv(dev);
10642         struct device *dp = &tp->pdev->dev;
10643
10644         if (wol->wolopts & ~WAKE_MAGIC)
10645                 return -EINVAL;
10646         if ((wol->wolopts & WAKE_MAGIC) &&
10647             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10648                 return -EINVAL;
10649
10650         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10651
10652         spin_lock_bh(&tp->lock);
10653         if (device_may_wakeup(dp))
10654                 tg3_flag_set(tp, WOL_ENABLE);
10655         else
10656                 tg3_flag_clear(tp, WOL_ENABLE);
10657         spin_unlock_bh(&tp->lock);
10658
10659         return 0;
10660 }
10661
10662 static u32 tg3_get_msglevel(struct net_device *dev)
10663 {
10664         struct tg3 *tp = netdev_priv(dev);
10665         return tp->msg_enable;
10666 }
10667
10668 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10669 {
10670         struct tg3 *tp = netdev_priv(dev);
10671         tp->msg_enable = value;
10672 }
10673
10674 static int tg3_nway_reset(struct net_device *dev)
10675 {
10676         struct tg3 *tp = netdev_priv(dev);
10677         int r;
10678
10679         if (!netif_running(dev))
10680                 return -EAGAIN;
10681
10682         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10683                 return -EINVAL;
10684
10685         if (tg3_flag(tp, USE_PHYLIB)) {
10686                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10687                         return -EAGAIN;
10688                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10689         } else {
10690                 u32 bmcr;
10691
10692                 spin_lock_bh(&tp->lock);
10693                 r = -EINVAL;
10694                 tg3_readphy(tp, MII_BMCR, &bmcr);
10695                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10696                     ((bmcr & BMCR_ANENABLE) ||
10697                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10698                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10699                                                    BMCR_ANENABLE);
10700                         r = 0;
10701                 }
10702                 spin_unlock_bh(&tp->lock);
10703         }
10704
10705         return r;
10706 }
10707
10708 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10709 {
10710         struct tg3 *tp = netdev_priv(dev);
10711
10712         ering->rx_max_pending = tp->rx_std_ring_mask;
10713         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10714                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10715         else
10716                 ering->rx_jumbo_max_pending = 0;
10717
10718         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10719
10720         ering->rx_pending = tp->rx_pending;
10721         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10722                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10723         else
10724                 ering->rx_jumbo_pending = 0;
10725
10726         ering->tx_pending = tp->napi[0].tx_pending;
10727 }
10728
10729 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10730 {
10731         struct tg3 *tp = netdev_priv(dev);
10732         int i, irq_sync = 0, err = 0;
10733
10734         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10735             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10736             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10737             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10738             (tg3_flag(tp, TSO_BUG) &&
10739              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10740                 return -EINVAL;
10741
10742         if (netif_running(dev)) {
10743                 tg3_phy_stop(tp);
10744                 tg3_netif_stop(tp);
10745                 irq_sync = 1;
10746         }
10747
10748         tg3_full_lock(tp, irq_sync);
10749
10750         tp->rx_pending = ering->rx_pending;
10751
10752         if (tg3_flag(tp, MAX_RXPEND_64) &&
10753             tp->rx_pending > 63)
10754                 tp->rx_pending = 63;
10755         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10756
10757         for (i = 0; i < tp->irq_max; i++)
10758                 tp->napi[i].tx_pending = ering->tx_pending;
10759
10760         if (netif_running(dev)) {
10761                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10762                 err = tg3_restart_hw(tp, 1);
10763                 if (!err)
10764                         tg3_netif_start(tp);
10765         }
10766
10767         tg3_full_unlock(tp);
10768
10769         if (irq_sync && !err)
10770                 tg3_phy_start(tp);
10771
10772         return err;
10773 }
10774
10775 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10776 {
10777         struct tg3 *tp = netdev_priv(dev);
10778
10779         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10780
10781         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
10782                 epause->rx_pause = 1;
10783         else
10784                 epause->rx_pause = 0;
10785
10786         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
10787                 epause->tx_pause = 1;
10788         else
10789                 epause->tx_pause = 0;
10790 }
10791
10792 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10793 {
10794         struct tg3 *tp = netdev_priv(dev);
10795         int err = 0;
10796
10797         if (tg3_flag(tp, USE_PHYLIB)) {
10798                 u32 newadv;
10799                 struct phy_device *phydev;
10800
10801                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10802
10803                 if (!(phydev->supported & SUPPORTED_Pause) ||
10804                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10805                      (epause->rx_pause != epause->tx_pause)))
10806                         return -EINVAL;
10807
10808                 tp->link_config.flowctrl = 0;
10809                 if (epause->rx_pause) {
10810                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10811
10812                         if (epause->tx_pause) {
10813                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10814                                 newadv = ADVERTISED_Pause;
10815                         } else
10816                                 newadv = ADVERTISED_Pause |
10817                                          ADVERTISED_Asym_Pause;
10818                 } else if (epause->tx_pause) {
10819                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10820                         newadv = ADVERTISED_Asym_Pause;
10821                 } else
10822                         newadv = 0;
10823
10824                 if (epause->autoneg)
10825                         tg3_flag_set(tp, PAUSE_AUTONEG);
10826                 else
10827                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10828
10829                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10830                         u32 oldadv = phydev->advertising &
10831                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10832                         if (oldadv != newadv) {
10833                                 phydev->advertising &=
10834                                         ~(ADVERTISED_Pause |
10835                                           ADVERTISED_Asym_Pause);
10836                                 phydev->advertising |= newadv;
10837                                 if (phydev->autoneg) {
10838                                         /*
10839                                          * Always renegotiate the link to
10840                                          * inform our link partner of our
10841                                          * flow control settings, even if the
10842                                          * flow control is forced.  Let
10843                                          * tg3_adjust_link() do the final
10844                                          * flow control setup.
10845                                          */
10846                                         return phy_start_aneg(phydev);
10847                                 }
10848                         }
10849
10850                         if (!epause->autoneg)
10851                                 tg3_setup_flow_control(tp, 0, 0);
10852                 } else {
10853                         tp->link_config.orig_advertising &=
10854                                         ~(ADVERTISED_Pause |
10855                                           ADVERTISED_Asym_Pause);
10856                         tp->link_config.orig_advertising |= newadv;
10857                 }
10858         } else {
10859                 int irq_sync = 0;
10860
10861                 if (netif_running(dev)) {
10862                         tg3_netif_stop(tp);
10863                         irq_sync = 1;
10864                 }
10865
10866                 tg3_full_lock(tp, irq_sync);
10867
10868                 if (epause->autoneg)
10869                         tg3_flag_set(tp, PAUSE_AUTONEG);
10870                 else
10871                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10872                 if (epause->rx_pause)
10873                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10874                 else
10875                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10876                 if (epause->tx_pause)
10877                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10878                 else
10879                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10880
10881                 if (netif_running(dev)) {
10882                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10883                         err = tg3_restart_hw(tp, 1);
10884                         if (!err)
10885                                 tg3_netif_start(tp);
10886                 }
10887
10888                 tg3_full_unlock(tp);
10889         }
10890
10891         return err;
10892 }
10893
10894 static int tg3_get_sset_count(struct net_device *dev, int sset)
10895 {
10896         switch (sset) {
10897         case ETH_SS_TEST:
10898                 return TG3_NUM_TEST;
10899         case ETH_SS_STATS:
10900                 return TG3_NUM_STATS;
10901         default:
10902                 return -EOPNOTSUPP;
10903         }
10904 }
10905
10906 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
10907                          u32 *rules __always_unused)
10908 {
10909         struct tg3 *tp = netdev_priv(dev);
10910
10911         if (!tg3_flag(tp, SUPPORT_MSIX))
10912                 return -EOPNOTSUPP;
10913
10914         switch (info->cmd) {
10915         case ETHTOOL_GRXRINGS:
10916                 if (netif_running(tp->dev))
10917                         info->data = tp->irq_cnt;
10918                 else {
10919                         info->data = num_online_cpus();
10920                         if (info->data > TG3_IRQ_MAX_VECS_RSS)
10921                                 info->data = TG3_IRQ_MAX_VECS_RSS;
10922                 }
10923
10924                 /* The first interrupt vector only
10925                  * handles link interrupts.
10926                  */
10927                 info->data -= 1;
10928                 return 0;
10929
10930         default:
10931                 return -EOPNOTSUPP;
10932         }
10933 }
10934
10935 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
10936 {
10937         u32 size = 0;
10938         struct tg3 *tp = netdev_priv(dev);
10939
10940         if (tg3_flag(tp, SUPPORT_MSIX))
10941                 size = TG3_RSS_INDIR_TBL_SIZE;
10942
10943         return size;
10944 }
10945
10946 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
10947 {
10948         struct tg3 *tp = netdev_priv(dev);
10949         int i;
10950
10951         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10952                 indir[i] = tp->rss_ind_tbl[i];
10953
10954         return 0;
10955 }
10956
10957 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
10958 {
10959         struct tg3 *tp = netdev_priv(dev);
10960         size_t i;
10961
10962         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10963                 tp->rss_ind_tbl[i] = indir[i];
10964
10965         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
10966                 return 0;
10967
10968         /* It is legal to write the indirection
10969          * table while the device is running.
10970          */
10971         tg3_full_lock(tp, 0);
10972         tg3_rss_write_indir_tbl(tp);
10973         tg3_full_unlock(tp);
10974
10975         return 0;
10976 }
10977
10978 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10979 {
10980         switch (stringset) {
10981         case ETH_SS_STATS:
10982                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10983                 break;
10984         case ETH_SS_TEST:
10985                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10986                 break;
10987         default:
10988                 WARN_ON(1);     /* we need a WARN() */
10989                 break;
10990         }
10991 }
10992
10993 static int tg3_set_phys_id(struct net_device *dev,
10994                             enum ethtool_phys_id_state state)
10995 {
10996         struct tg3 *tp = netdev_priv(dev);
10997
10998         if (!netif_running(tp->dev))
10999                 return -EAGAIN;
11000
11001         switch (state) {
11002         case ETHTOOL_ID_ACTIVE:
11003                 return 1;       /* cycle on/off once per second */
11004
11005         case ETHTOOL_ID_ON:
11006                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11007                      LED_CTRL_1000MBPS_ON |
11008                      LED_CTRL_100MBPS_ON |
11009                      LED_CTRL_10MBPS_ON |
11010                      LED_CTRL_TRAFFIC_OVERRIDE |
11011                      LED_CTRL_TRAFFIC_BLINK |
11012                      LED_CTRL_TRAFFIC_LED);
11013                 break;
11014
11015         case ETHTOOL_ID_OFF:
11016                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11017                      LED_CTRL_TRAFFIC_OVERRIDE);
11018                 break;
11019
11020         case ETHTOOL_ID_INACTIVE:
11021                 tw32(MAC_LED_CTRL, tp->led_ctrl);
11022                 break;
11023         }
11024
11025         return 0;
11026 }
11027
11028 static void tg3_get_ethtool_stats(struct net_device *dev,
11029                                    struct ethtool_stats *estats, u64 *tmp_stats)
11030 {
11031         struct tg3 *tp = netdev_priv(dev);
11032
11033         tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11034 }
11035
11036 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11037 {
11038         int i;
11039         __be32 *buf;
11040         u32 offset = 0, len = 0;
11041         u32 magic, val;
11042
11043         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11044                 return NULL;
11045
11046         if (magic == TG3_EEPROM_MAGIC) {
11047                 for (offset = TG3_NVM_DIR_START;
11048                      offset < TG3_NVM_DIR_END;
11049                      offset += TG3_NVM_DIRENT_SIZE) {
11050                         if (tg3_nvram_read(tp, offset, &val))
11051                                 return NULL;
11052
11053                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11054                             TG3_NVM_DIRTYPE_EXTVPD)
11055                                 break;
11056                 }
11057
11058                 if (offset != TG3_NVM_DIR_END) {
11059                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11060                         if (tg3_nvram_read(tp, offset + 4, &offset))
11061                                 return NULL;
11062
11063                         offset = tg3_nvram_logical_addr(tp, offset);
11064                 }
11065         }
11066
11067         if (!offset || !len) {
11068                 offset = TG3_NVM_VPD_OFF;
11069                 len = TG3_NVM_VPD_LEN;
11070         }
11071
11072         buf = kmalloc(len, GFP_KERNEL);
11073         if (buf == NULL)
11074                 return NULL;
11075
11076         if (magic == TG3_EEPROM_MAGIC) {
11077                 for (i = 0; i < len; i += 4) {
11078                         /* The data is in little-endian format in NVRAM.
11079                          * Use the big-endian read routines to preserve
11080                          * the byte order as it exists in NVRAM.
11081                          */
11082                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11083                                 goto error;
11084                 }
11085         } else {
11086                 u8 *ptr;
11087                 ssize_t cnt;
11088                 unsigned int pos = 0;
11089
11090                 ptr = (u8 *)&buf[0];
11091                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11092                         cnt = pci_read_vpd(tp->pdev, pos,
11093                                            len - pos, ptr);
11094                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
11095                                 cnt = 0;
11096                         else if (cnt < 0)
11097                                 goto error;
11098                 }
11099                 if (pos != len)
11100                         goto error;
11101         }
11102
11103         *vpdlen = len;
11104
11105         return buf;
11106
11107 error:
11108         kfree(buf);
11109         return NULL;
11110 }
11111
11112 #define NVRAM_TEST_SIZE 0x100
11113 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
11114 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
11115 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
11116 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
11117 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
11118 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
11119 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11120 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11121
11122 static int tg3_test_nvram(struct tg3 *tp)
11123 {
11124         u32 csum, magic, len;
11125         __be32 *buf;
11126         int i, j, k, err = 0, size;
11127
11128         if (tg3_flag(tp, NO_NVRAM))
11129                 return 0;
11130
11131         if (tg3_nvram_read(tp, 0, &magic) != 0)
11132                 return -EIO;
11133
11134         if (magic == TG3_EEPROM_MAGIC)
11135                 size = NVRAM_TEST_SIZE;
11136         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11137                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11138                     TG3_EEPROM_SB_FORMAT_1) {
11139                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11140                         case TG3_EEPROM_SB_REVISION_0:
11141                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11142                                 break;
11143                         case TG3_EEPROM_SB_REVISION_2:
11144                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11145                                 break;
11146                         case TG3_EEPROM_SB_REVISION_3:
11147                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11148                                 break;
11149                         case TG3_EEPROM_SB_REVISION_4:
11150                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11151                                 break;
11152                         case TG3_EEPROM_SB_REVISION_5:
11153                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11154                                 break;
11155                         case TG3_EEPROM_SB_REVISION_6:
11156                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11157                                 break;
11158                         default:
11159                                 return -EIO;
11160                         }
11161                 } else
11162                         return 0;
11163         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11164                 size = NVRAM_SELFBOOT_HW_SIZE;
11165         else
11166                 return -EIO;
11167
11168         buf = kmalloc(size, GFP_KERNEL);
11169         if (buf == NULL)
11170                 return -ENOMEM;
11171
11172         err = -EIO;
11173         for (i = 0, j = 0; i < size; i += 4, j++) {
11174                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11175                 if (err)
11176                         break;
11177         }
11178         if (i < size)
11179                 goto out;
11180
11181         /* Selfboot format */
11182         magic = be32_to_cpu(buf[0]);
11183         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11184             TG3_EEPROM_MAGIC_FW) {
11185                 u8 *buf8 = (u8 *) buf, csum8 = 0;
11186
11187                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11188                     TG3_EEPROM_SB_REVISION_2) {
11189                         /* For rev 2, the csum doesn't include the MBA. */
11190                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11191                                 csum8 += buf8[i];
11192                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11193                                 csum8 += buf8[i];
11194                 } else {
11195                         for (i = 0; i < size; i++)
11196                                 csum8 += buf8[i];
11197                 }
11198
11199                 if (csum8 == 0) {
11200                         err = 0;
11201                         goto out;
11202                 }
11203
11204                 err = -EIO;
11205                 goto out;
11206         }
11207
11208         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11209             TG3_EEPROM_MAGIC_HW) {
11210                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11211                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11212                 u8 *buf8 = (u8 *) buf;
11213
11214                 /* Separate the parity bits and the data bytes.  */
11215                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11216                         if ((i == 0) || (i == 8)) {
11217                                 int l;
11218                                 u8 msk;
11219
11220                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11221                                         parity[k++] = buf8[i] & msk;
11222                                 i++;
11223                         } else if (i == 16) {
11224                                 int l;
11225                                 u8 msk;
11226
11227                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11228                                         parity[k++] = buf8[i] & msk;
11229                                 i++;
11230
11231                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11232                                         parity[k++] = buf8[i] & msk;
11233                                 i++;
11234                         }
11235                         data[j++] = buf8[i];
11236                 }
11237
11238                 err = -EIO;
11239                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11240                         u8 hw8 = hweight8(data[i]);
11241
11242                         if ((hw8 & 0x1) && parity[i])
11243                                 goto out;
11244                         else if (!(hw8 & 0x1) && !parity[i])
11245                                 goto out;
11246                 }
11247                 err = 0;
11248                 goto out;
11249         }
11250
11251         err = -EIO;
11252
11253         /* Bootstrap checksum at offset 0x10 */
11254         csum = calc_crc((unsigned char *) buf, 0x10);
11255         if (csum != le32_to_cpu(buf[0x10/4]))
11256                 goto out;
11257
11258         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11259         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11260         if (csum != le32_to_cpu(buf[0xfc/4]))
11261                 goto out;
11262
11263         kfree(buf);
11264
11265         buf = tg3_vpd_readblock(tp, &len);
11266         if (!buf)
11267                 return -ENOMEM;
11268
11269         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11270         if (i > 0) {
11271                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11272                 if (j < 0)
11273                         goto out;
11274
11275                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11276                         goto out;
11277
11278                 i += PCI_VPD_LRDT_TAG_SIZE;
11279                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11280                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11281                 if (j > 0) {
11282                         u8 csum8 = 0;
11283
11284                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11285
11286                         for (i = 0; i <= j; i++)
11287                                 csum8 += ((u8 *)buf)[i];
11288
11289                         if (csum8)
11290                                 goto out;
11291                 }
11292         }
11293
11294         err = 0;
11295
11296 out:
11297         kfree(buf);
11298         return err;
11299 }
11300
11301 #define TG3_SERDES_TIMEOUT_SEC  2
11302 #define TG3_COPPER_TIMEOUT_SEC  6
11303
11304 static int tg3_test_link(struct tg3 *tp)
11305 {
11306         int i, max;
11307
11308         if (!netif_running(tp->dev))
11309                 return -ENODEV;
11310
11311         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11312                 max = TG3_SERDES_TIMEOUT_SEC;
11313         else
11314                 max = TG3_COPPER_TIMEOUT_SEC;
11315
11316         for (i = 0; i < max; i++) {
11317                 if (netif_carrier_ok(tp->dev))
11318                         return 0;
11319
11320                 if (msleep_interruptible(1000))
11321                         break;
11322         }
11323
11324         return -EIO;
11325 }
11326
11327 /* Only test the commonly used registers */
11328 static int tg3_test_registers(struct tg3 *tp)
11329 {
11330         int i, is_5705, is_5750;
11331         u32 offset, read_mask, write_mask, val, save_val, read_val;
11332         static struct {
11333                 u16 offset;
11334                 u16 flags;
11335 #define TG3_FL_5705     0x1
11336 #define TG3_FL_NOT_5705 0x2
11337 #define TG3_FL_NOT_5788 0x4
11338 #define TG3_FL_NOT_5750 0x8
11339                 u32 read_mask;
11340                 u32 write_mask;
11341         } reg_tbl[] = {
11342                 /* MAC Control Registers */
11343                 { MAC_MODE, TG3_FL_NOT_5705,
11344                         0x00000000, 0x00ef6f8c },
11345                 { MAC_MODE, TG3_FL_5705,
11346                         0x00000000, 0x01ef6b8c },
11347                 { MAC_STATUS, TG3_FL_NOT_5705,
11348                         0x03800107, 0x00000000 },
11349                 { MAC_STATUS, TG3_FL_5705,
11350                         0x03800100, 0x00000000 },
11351                 { MAC_ADDR_0_HIGH, 0x0000,
11352                         0x00000000, 0x0000ffff },
11353                 { MAC_ADDR_0_LOW, 0x0000,
11354                         0x00000000, 0xffffffff },
11355                 { MAC_RX_MTU_SIZE, 0x0000,
11356                         0x00000000, 0x0000ffff },
11357                 { MAC_TX_MODE, 0x0000,
11358                         0x00000000, 0x00000070 },
11359                 { MAC_TX_LENGTHS, 0x0000,
11360                         0x00000000, 0x00003fff },
11361                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11362                         0x00000000, 0x000007fc },
11363                 { MAC_RX_MODE, TG3_FL_5705,
11364                         0x00000000, 0x000007dc },
11365                 { MAC_HASH_REG_0, 0x0000,
11366                         0x00000000, 0xffffffff },
11367                 { MAC_HASH_REG_1, 0x0000,
11368                         0x00000000, 0xffffffff },
11369                 { MAC_HASH_REG_2, 0x0000,
11370                         0x00000000, 0xffffffff },
11371                 { MAC_HASH_REG_3, 0x0000,
11372                         0x00000000, 0xffffffff },
11373
11374                 /* Receive Data and Receive BD Initiator Control Registers. */
11375                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11376                         0x00000000, 0xffffffff },
11377                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11378                         0x00000000, 0xffffffff },
11379                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11380                         0x00000000, 0x00000003 },
11381                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11382                         0x00000000, 0xffffffff },
11383                 { RCVDBDI_STD_BD+0, 0x0000,
11384                         0x00000000, 0xffffffff },
11385                 { RCVDBDI_STD_BD+4, 0x0000,
11386                         0x00000000, 0xffffffff },
11387                 { RCVDBDI_STD_BD+8, 0x0000,
11388                         0x00000000, 0xffff0002 },
11389                 { RCVDBDI_STD_BD+0xc, 0x0000,
11390                         0x00000000, 0xffffffff },
11391
11392                 /* Receive BD Initiator Control Registers. */
11393                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11394                         0x00000000, 0xffffffff },
11395                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11396                         0x00000000, 0x000003ff },
11397                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11398                         0x00000000, 0xffffffff },
11399
11400                 /* Host Coalescing Control Registers. */
11401                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11402                         0x00000000, 0x00000004 },
11403                 { HOSTCC_MODE, TG3_FL_5705,
11404                         0x00000000, 0x000000f6 },
11405                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11406                         0x00000000, 0xffffffff },
11407                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11408                         0x00000000, 0x000003ff },
11409                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11410                         0x00000000, 0xffffffff },
11411                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11412                         0x00000000, 0x000003ff },
11413                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11414                         0x00000000, 0xffffffff },
11415                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11416                         0x00000000, 0x000000ff },
11417                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11418                         0x00000000, 0xffffffff },
11419                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11420                         0x00000000, 0x000000ff },
11421                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11422                         0x00000000, 0xffffffff },
11423                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11424                         0x00000000, 0xffffffff },
11425                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11426                         0x00000000, 0xffffffff },
11427                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11428                         0x00000000, 0x000000ff },
11429                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11430                         0x00000000, 0xffffffff },
11431                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11432                         0x00000000, 0x000000ff },
11433                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11434                         0x00000000, 0xffffffff },
11435                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11436                         0x00000000, 0xffffffff },
11437                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11438                         0x00000000, 0xffffffff },
11439                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11440                         0x00000000, 0xffffffff },
11441                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11442                         0x00000000, 0xffffffff },
11443                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11444                         0xffffffff, 0x00000000 },
11445                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11446                         0xffffffff, 0x00000000 },
11447
11448                 /* Buffer Manager Control Registers. */
11449                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11450                         0x00000000, 0x007fff80 },
11451                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11452                         0x00000000, 0x007fffff },
11453                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11454                         0x00000000, 0x0000003f },
11455                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11456                         0x00000000, 0x000001ff },
11457                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11458                         0x00000000, 0x000001ff },
11459                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11460                         0xffffffff, 0x00000000 },
11461                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11462                         0xffffffff, 0x00000000 },
11463
11464                 /* Mailbox Registers */
11465                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11466                         0x00000000, 0x000001ff },
11467                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11468                         0x00000000, 0x000001ff },
11469                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11470                         0x00000000, 0x000007ff },
11471                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11472                         0x00000000, 0x000001ff },
11473
11474                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11475         };
11476
11477         is_5705 = is_5750 = 0;
11478         if (tg3_flag(tp, 5705_PLUS)) {
11479                 is_5705 = 1;
11480                 if (tg3_flag(tp, 5750_PLUS))
11481                         is_5750 = 1;
11482         }
11483
11484         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11485                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11486                         continue;
11487
11488                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11489                         continue;
11490
11491                 if (tg3_flag(tp, IS_5788) &&
11492                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11493                         continue;
11494
11495                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11496                         continue;
11497
11498                 offset = (u32) reg_tbl[i].offset;
11499                 read_mask = reg_tbl[i].read_mask;
11500                 write_mask = reg_tbl[i].write_mask;
11501
11502                 /* Save the original register content */
11503                 save_val = tr32(offset);
11504
11505                 /* Determine the read-only value. */
11506                 read_val = save_val & read_mask;
11507
11508                 /* Write zero to the register, then make sure the read-only bits
11509                  * are not changed and the read/write bits are all zeros.
11510                  */
11511                 tw32(offset, 0);
11512
11513                 val = tr32(offset);
11514
11515                 /* Test the read-only and read/write bits. */
11516                 if (((val & read_mask) != read_val) || (val & write_mask))
11517                         goto out;
11518
11519                 /* Write ones to all the bits defined by RdMask and WrMask, then
11520                  * make sure the read-only bits are not changed and the
11521                  * read/write bits are all ones.
11522                  */
11523                 tw32(offset, read_mask | write_mask);
11524
11525                 val = tr32(offset);
11526
11527                 /* Test the read-only bits. */
11528                 if ((val & read_mask) != read_val)
11529                         goto out;
11530
11531                 /* Test the read/write bits. */
11532                 if ((val & write_mask) != write_mask)
11533                         goto out;
11534
11535                 tw32(offset, save_val);
11536         }
11537
11538         return 0;
11539
11540 out:
11541         if (netif_msg_hw(tp))
11542                 netdev_err(tp->dev,
11543                            "Register test failed at offset %x\n", offset);
11544         tw32(offset, save_val);
11545         return -EIO;
11546 }
11547
11548 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11549 {
11550         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11551         int i;
11552         u32 j;
11553
11554         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11555                 for (j = 0; j < len; j += 4) {
11556                         u32 val;
11557
11558                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11559                         tg3_read_mem(tp, offset + j, &val);
11560                         if (val != test_pattern[i])
11561                                 return -EIO;
11562                 }
11563         }
11564         return 0;
11565 }
11566
11567 static int tg3_test_memory(struct tg3 *tp)
11568 {
11569         static struct mem_entry {
11570                 u32 offset;
11571                 u32 len;
11572         } mem_tbl_570x[] = {
11573                 { 0x00000000, 0x00b50},
11574                 { 0x00002000, 0x1c000},
11575                 { 0xffffffff, 0x00000}
11576         }, mem_tbl_5705[] = {
11577                 { 0x00000100, 0x0000c},
11578                 { 0x00000200, 0x00008},
11579                 { 0x00004000, 0x00800},
11580                 { 0x00006000, 0x01000},
11581                 { 0x00008000, 0x02000},
11582                 { 0x00010000, 0x0e000},
11583                 { 0xffffffff, 0x00000}
11584         }, mem_tbl_5755[] = {
11585                 { 0x00000200, 0x00008},
11586                 { 0x00004000, 0x00800},
11587                 { 0x00006000, 0x00800},
11588                 { 0x00008000, 0x02000},
11589                 { 0x00010000, 0x0c000},
11590                 { 0xffffffff, 0x00000}
11591         }, mem_tbl_5906[] = {
11592                 { 0x00000200, 0x00008},
11593                 { 0x00004000, 0x00400},
11594                 { 0x00006000, 0x00400},
11595                 { 0x00008000, 0x01000},
11596                 { 0x00010000, 0x01000},
11597                 { 0xffffffff, 0x00000}
11598         }, mem_tbl_5717[] = {
11599                 { 0x00000200, 0x00008},
11600                 { 0x00010000, 0x0a000},
11601                 { 0x00020000, 0x13c00},
11602                 { 0xffffffff, 0x00000}
11603         }, mem_tbl_57765[] = {
11604                 { 0x00000200, 0x00008},
11605                 { 0x00004000, 0x00800},
11606                 { 0x00006000, 0x09800},
11607                 { 0x00010000, 0x0a000},
11608                 { 0xffffffff, 0x00000}
11609         };
11610         struct mem_entry *mem_tbl;
11611         int err = 0;
11612         int i;
11613
11614         if (tg3_flag(tp, 5717_PLUS))
11615                 mem_tbl = mem_tbl_5717;
11616         else if (tg3_flag(tp, 57765_CLASS))
11617                 mem_tbl = mem_tbl_57765;
11618         else if (tg3_flag(tp, 5755_PLUS))
11619                 mem_tbl = mem_tbl_5755;
11620         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11621                 mem_tbl = mem_tbl_5906;
11622         else if (tg3_flag(tp, 5705_PLUS))
11623                 mem_tbl = mem_tbl_5705;
11624         else
11625                 mem_tbl = mem_tbl_570x;
11626
11627         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11628                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11629                 if (err)
11630                         break;
11631         }
11632
11633         return err;
11634 }
11635
11636 #define TG3_TSO_MSS             500
11637
11638 #define TG3_TSO_IP_HDR_LEN      20
11639 #define TG3_TSO_TCP_HDR_LEN     20
11640 #define TG3_TSO_TCP_OPT_LEN     12
11641
11642 static const u8 tg3_tso_header[] = {
11643 0x08, 0x00,
11644 0x45, 0x00, 0x00, 0x00,
11645 0x00, 0x00, 0x40, 0x00,
11646 0x40, 0x06, 0x00, 0x00,
11647 0x0a, 0x00, 0x00, 0x01,
11648 0x0a, 0x00, 0x00, 0x02,
11649 0x0d, 0x00, 0xe0, 0x00,
11650 0x00, 0x00, 0x01, 0x00,
11651 0x00, 0x00, 0x02, 0x00,
11652 0x80, 0x10, 0x10, 0x00,
11653 0x14, 0x09, 0x00, 0x00,
11654 0x01, 0x01, 0x08, 0x0a,
11655 0x11, 0x11, 0x11, 0x11,
11656 0x11, 0x11, 0x11, 0x11,
11657 };
11658
11659 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11660 {
11661         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11662         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11663         u32 budget;
11664         struct sk_buff *skb;
11665         u8 *tx_data, *rx_data;
11666         dma_addr_t map;
11667         int num_pkts, tx_len, rx_len, i, err;
11668         struct tg3_rx_buffer_desc *desc;
11669         struct tg3_napi *tnapi, *rnapi;
11670         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11671
11672         tnapi = &tp->napi[0];
11673         rnapi = &tp->napi[0];
11674         if (tp->irq_cnt > 1) {
11675                 if (tg3_flag(tp, ENABLE_RSS))
11676                         rnapi = &tp->napi[1];
11677                 if (tg3_flag(tp, ENABLE_TSS))
11678                         tnapi = &tp->napi[1];
11679         }
11680         coal_now = tnapi->coal_now | rnapi->coal_now;
11681
11682         err = -EIO;
11683
11684         tx_len = pktsz;
11685         skb = netdev_alloc_skb(tp->dev, tx_len);
11686         if (!skb)
11687                 return -ENOMEM;
11688
11689         tx_data = skb_put(skb, tx_len);
11690         memcpy(tx_data, tp->dev->dev_addr, 6);
11691         memset(tx_data + 6, 0x0, 8);
11692
11693         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11694
11695         if (tso_loopback) {
11696                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11697
11698                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11699                               TG3_TSO_TCP_OPT_LEN;
11700
11701                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11702                        sizeof(tg3_tso_header));
11703                 mss = TG3_TSO_MSS;
11704
11705                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11706                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11707
11708                 /* Set the total length field in the IP header */
11709                 iph->tot_len = htons((u16)(mss + hdr_len));
11710
11711                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11712                               TXD_FLAG_CPU_POST_DMA);
11713
11714                 if (tg3_flag(tp, HW_TSO_1) ||
11715                     tg3_flag(tp, HW_TSO_2) ||
11716                     tg3_flag(tp, HW_TSO_3)) {
11717                         struct tcphdr *th;
11718                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11719                         th = (struct tcphdr *)&tx_data[val];
11720                         th->check = 0;
11721                 } else
11722                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11723
11724                 if (tg3_flag(tp, HW_TSO_3)) {
11725                         mss |= (hdr_len & 0xc) << 12;
11726                         if (hdr_len & 0x10)
11727                                 base_flags |= 0x00000010;
11728                         base_flags |= (hdr_len & 0x3e0) << 5;
11729                 } else if (tg3_flag(tp, HW_TSO_2))
11730                         mss |= hdr_len << 9;
11731                 else if (tg3_flag(tp, HW_TSO_1) ||
11732                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11733                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11734                 } else {
11735                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11736                 }
11737
11738                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11739         } else {
11740                 num_pkts = 1;
11741                 data_off = ETH_HLEN;
11742         }
11743
11744         for (i = data_off; i < tx_len; i++)
11745                 tx_data[i] = (u8) (i & 0xff);
11746
11747         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11748         if (pci_dma_mapping_error(tp->pdev, map)) {
11749                 dev_kfree_skb(skb);
11750                 return -EIO;
11751         }
11752
11753         val = tnapi->tx_prod;
11754         tnapi->tx_buffers[val].skb = skb;
11755         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11756
11757         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11758                rnapi->coal_now);
11759
11760         udelay(10);
11761
11762         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11763
11764         budget = tg3_tx_avail(tnapi);
11765         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11766                             base_flags | TXD_FLAG_END, mss, 0)) {
11767                 tnapi->tx_buffers[val].skb = NULL;
11768                 dev_kfree_skb(skb);
11769                 return -EIO;
11770         }
11771
11772         tnapi->tx_prod++;
11773
11774         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11775         tr32_mailbox(tnapi->prodmbox);
11776
11777         udelay(10);
11778
11779         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11780         for (i = 0; i < 35; i++) {
11781                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11782                        coal_now);
11783
11784                 udelay(10);
11785
11786                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11787                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11788                 if ((tx_idx == tnapi->tx_prod) &&
11789                     (rx_idx == (rx_start_idx + num_pkts)))
11790                         break;
11791         }
11792
11793         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11794         dev_kfree_skb(skb);
11795
11796         if (tx_idx != tnapi->tx_prod)
11797                 goto out;
11798
11799         if (rx_idx != rx_start_idx + num_pkts)
11800                 goto out;
11801
11802         val = data_off;
11803         while (rx_idx != rx_start_idx) {
11804                 desc = &rnapi->rx_rcb[rx_start_idx++];
11805                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11806                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11807
11808                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11809                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11810                         goto out;
11811
11812                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11813                          - ETH_FCS_LEN;
11814
11815                 if (!tso_loopback) {
11816                         if (rx_len != tx_len)
11817                                 goto out;
11818
11819                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11820                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11821                                         goto out;
11822                         } else {
11823                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11824                                         goto out;
11825                         }
11826                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11827                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11828                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11829                         goto out;
11830                 }
11831
11832                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11833                         rx_data = tpr->rx_std_buffers[desc_idx].data;
11834                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11835                                              mapping);
11836                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11837                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11838                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11839                                              mapping);
11840                 } else
11841                         goto out;
11842
11843                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11844                                             PCI_DMA_FROMDEVICE);
11845
11846                 rx_data += TG3_RX_OFFSET(tp);
11847                 for (i = data_off; i < rx_len; i++, val++) {
11848                         if (*(rx_data + i) != (u8) (val & 0xff))
11849                                 goto out;
11850                 }
11851         }
11852
11853         err = 0;
11854
11855         /* tg3_free_rings will unmap and free the rx_data */
11856 out:
11857         return err;
11858 }
11859
11860 #define TG3_STD_LOOPBACK_FAILED         1
11861 #define TG3_JMB_LOOPBACK_FAILED         2
11862 #define TG3_TSO_LOOPBACK_FAILED         4
11863 #define TG3_LOOPBACK_FAILED \
11864         (TG3_STD_LOOPBACK_FAILED | \
11865          TG3_JMB_LOOPBACK_FAILED | \
11866          TG3_TSO_LOOPBACK_FAILED)
11867
11868 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11869 {
11870         int err = -EIO;
11871         u32 eee_cap;
11872
11873         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11874         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11875
11876         if (!netif_running(tp->dev)) {
11877                 data[0] = TG3_LOOPBACK_FAILED;
11878                 data[1] = TG3_LOOPBACK_FAILED;
11879                 if (do_extlpbk)
11880                         data[2] = TG3_LOOPBACK_FAILED;
11881                 goto done;
11882         }
11883
11884         err = tg3_reset_hw(tp, 1);
11885         if (err) {
11886                 data[0] = TG3_LOOPBACK_FAILED;
11887                 data[1] = TG3_LOOPBACK_FAILED;
11888                 if (do_extlpbk)
11889                         data[2] = TG3_LOOPBACK_FAILED;
11890                 goto done;
11891         }
11892
11893         if (tg3_flag(tp, ENABLE_RSS)) {
11894                 int i;
11895
11896                 /* Reroute all rx packets to the 1st queue */
11897                 for (i = MAC_RSS_INDIR_TBL_0;
11898                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11899                         tw32(i, 0x0);
11900         }
11901
11902         /* HW errata - mac loopback fails in some cases on 5780.
11903          * Normal traffic and PHY loopback are not affected by
11904          * errata.  Also, the MAC loopback test is deprecated for
11905          * all newer ASIC revisions.
11906          */
11907         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11908             !tg3_flag(tp, CPMU_PRESENT)) {
11909                 tg3_mac_loopback(tp, true);
11910
11911                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11912                         data[0] |= TG3_STD_LOOPBACK_FAILED;
11913
11914                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11915                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11916                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
11917
11918                 tg3_mac_loopback(tp, false);
11919         }
11920
11921         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11922             !tg3_flag(tp, USE_PHYLIB)) {
11923                 int i;
11924
11925                 tg3_phy_lpbk_set(tp, 0, false);
11926
11927                 /* Wait for link */
11928                 for (i = 0; i < 100; i++) {
11929                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11930                                 break;
11931                         mdelay(1);
11932                 }
11933
11934                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11935                         data[1] |= TG3_STD_LOOPBACK_FAILED;
11936                 if (tg3_flag(tp, TSO_CAPABLE) &&
11937                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11938                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
11939                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11940                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11941                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
11942
11943                 if (do_extlpbk) {
11944                         tg3_phy_lpbk_set(tp, 0, true);
11945
11946                         /* All link indications report up, but the hardware
11947                          * isn't really ready for about 20 msec.  Double it
11948                          * to be sure.
11949                          */
11950                         mdelay(40);
11951
11952                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11953                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
11954                         if (tg3_flag(tp, TSO_CAPABLE) &&
11955                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11956                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11957                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11958                             tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11959                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11960                 }
11961
11962                 /* Re-enable gphy autopowerdown. */
11963                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11964                         tg3_phy_toggle_apd(tp, true);
11965         }
11966
11967         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11968
11969 done:
11970         tp->phy_flags |= eee_cap;
11971
11972         return err;
11973 }
11974
11975 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11976                           u64 *data)
11977 {
11978         struct tg3 *tp = netdev_priv(dev);
11979         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11980
11981         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11982             tg3_power_up(tp)) {
11983                 etest->flags |= ETH_TEST_FL_FAILED;
11984                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11985                 return;
11986         }
11987
11988         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11989
11990         if (tg3_test_nvram(tp) != 0) {
11991                 etest->flags |= ETH_TEST_FL_FAILED;
11992                 data[0] = 1;
11993         }
11994         if (!doextlpbk && tg3_test_link(tp)) {
11995                 etest->flags |= ETH_TEST_FL_FAILED;
11996                 data[1] = 1;
11997         }
11998         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11999                 int err, err2 = 0, irq_sync = 0;
12000
12001                 if (netif_running(dev)) {
12002                         tg3_phy_stop(tp);
12003                         tg3_netif_stop(tp);
12004                         irq_sync = 1;
12005                 }
12006
12007                 tg3_full_lock(tp, irq_sync);
12008
12009                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12010                 err = tg3_nvram_lock(tp);
12011                 tg3_halt_cpu(tp, RX_CPU_BASE);
12012                 if (!tg3_flag(tp, 5705_PLUS))
12013                         tg3_halt_cpu(tp, TX_CPU_BASE);
12014                 if (!err)
12015                         tg3_nvram_unlock(tp);
12016
12017                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12018                         tg3_phy_reset(tp);
12019
12020                 if (tg3_test_registers(tp) != 0) {
12021                         etest->flags |= ETH_TEST_FL_FAILED;
12022                         data[2] = 1;
12023                 }
12024
12025                 if (tg3_test_memory(tp) != 0) {
12026                         etest->flags |= ETH_TEST_FL_FAILED;
12027                         data[3] = 1;
12028                 }
12029
12030                 if (doextlpbk)
12031                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12032
12033                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12034                         etest->flags |= ETH_TEST_FL_FAILED;
12035
12036                 tg3_full_unlock(tp);
12037
12038                 if (tg3_test_interrupt(tp) != 0) {
12039                         etest->flags |= ETH_TEST_FL_FAILED;
12040                         data[7] = 1;
12041                 }
12042
12043                 tg3_full_lock(tp, 0);
12044
12045                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12046                 if (netif_running(dev)) {
12047                         tg3_flag_set(tp, INIT_COMPLETE);
12048                         err2 = tg3_restart_hw(tp, 1);
12049                         if (!err2)
12050                                 tg3_netif_start(tp);
12051                 }
12052
12053                 tg3_full_unlock(tp);
12054
12055                 if (irq_sync && !err2)
12056                         tg3_phy_start(tp);
12057         }
12058         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12059                 tg3_power_down(tp);
12060
12061 }
12062
12063 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12064 {
12065         struct mii_ioctl_data *data = if_mii(ifr);
12066         struct tg3 *tp = netdev_priv(dev);
12067         int err;
12068
12069         if (tg3_flag(tp, USE_PHYLIB)) {
12070                 struct phy_device *phydev;
12071                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12072                         return -EAGAIN;
12073                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12074                 return phy_mii_ioctl(phydev, ifr, cmd);
12075         }
12076
12077         switch (cmd) {
12078         case SIOCGMIIPHY:
12079                 data->phy_id = tp->phy_addr;
12080
12081                 /* fallthru */
12082         case SIOCGMIIREG: {
12083                 u32 mii_regval;
12084
12085                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12086                         break;                  /* We have no PHY */
12087
12088                 if (!netif_running(dev))
12089                         return -EAGAIN;
12090
12091                 spin_lock_bh(&tp->lock);
12092                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12093                 spin_unlock_bh(&tp->lock);
12094
12095                 data->val_out = mii_regval;
12096
12097                 return err;
12098         }
12099
12100         case SIOCSMIIREG:
12101                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12102                         break;                  /* We have no PHY */
12103
12104                 if (!netif_running(dev))
12105                         return -EAGAIN;
12106
12107                 spin_lock_bh(&tp->lock);
12108                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12109                 spin_unlock_bh(&tp->lock);
12110
12111                 return err;
12112
12113         default:
12114                 /* do nothing */
12115                 break;
12116         }
12117         return -EOPNOTSUPP;
12118 }
12119
12120 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12121 {
12122         struct tg3 *tp = netdev_priv(dev);
12123
12124         memcpy(ec, &tp->coal, sizeof(*ec));
12125         return 0;
12126 }
12127
12128 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12129 {
12130         struct tg3 *tp = netdev_priv(dev);
12131         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12132         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12133
12134         if (!tg3_flag(tp, 5705_PLUS)) {
12135                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12136                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12137                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12138                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12139         }
12140
12141         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12142             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12143             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12144             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12145             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12146             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12147             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12148             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12149             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12150             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12151                 return -EINVAL;
12152
12153         /* No rx interrupts will be generated if both are zero */
12154         if ((ec->rx_coalesce_usecs == 0) &&
12155             (ec->rx_max_coalesced_frames == 0))
12156                 return -EINVAL;
12157
12158         /* No tx interrupts will be generated if both are zero */
12159         if ((ec->tx_coalesce_usecs == 0) &&
12160             (ec->tx_max_coalesced_frames == 0))
12161                 return -EINVAL;
12162
12163         /* Only copy relevant parameters, ignore all others. */
12164         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12165         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12166         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12167         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12168         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12169         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12170         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12171         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12172         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12173
12174         if (netif_running(dev)) {
12175                 tg3_full_lock(tp, 0);
12176                 __tg3_set_coalesce(tp, &tp->coal);
12177                 tg3_full_unlock(tp);
12178         }
12179         return 0;
12180 }
12181
12182 static const struct ethtool_ops tg3_ethtool_ops = {
12183         .get_settings           = tg3_get_settings,
12184         .set_settings           = tg3_set_settings,
12185         .get_drvinfo            = tg3_get_drvinfo,
12186         .get_regs_len           = tg3_get_regs_len,
12187         .get_regs               = tg3_get_regs,
12188         .get_wol                = tg3_get_wol,
12189         .set_wol                = tg3_set_wol,
12190         .get_msglevel           = tg3_get_msglevel,
12191         .set_msglevel           = tg3_set_msglevel,
12192         .nway_reset             = tg3_nway_reset,
12193         .get_link               = ethtool_op_get_link,
12194         .get_eeprom_len         = tg3_get_eeprom_len,
12195         .get_eeprom             = tg3_get_eeprom,
12196         .set_eeprom             = tg3_set_eeprom,
12197         .get_ringparam          = tg3_get_ringparam,
12198         .set_ringparam          = tg3_set_ringparam,
12199         .get_pauseparam         = tg3_get_pauseparam,
12200         .set_pauseparam         = tg3_set_pauseparam,
12201         .self_test              = tg3_self_test,
12202         .get_strings            = tg3_get_strings,
12203         .set_phys_id            = tg3_set_phys_id,
12204         .get_ethtool_stats      = tg3_get_ethtool_stats,
12205         .get_coalesce           = tg3_get_coalesce,
12206         .set_coalesce           = tg3_set_coalesce,
12207         .get_sset_count         = tg3_get_sset_count,
12208         .get_rxnfc              = tg3_get_rxnfc,
12209         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
12210         .get_rxfh_indir         = tg3_get_rxfh_indir,
12211         .set_rxfh_indir         = tg3_set_rxfh_indir,
12212 };
12213
12214 static void tg3_set_rx_mode(struct net_device *dev)
12215 {
12216         struct tg3 *tp = netdev_priv(dev);
12217
12218         if (!netif_running(dev))
12219                 return;
12220
12221         tg3_full_lock(tp, 0);
12222         __tg3_set_rx_mode(dev);
12223         tg3_full_unlock(tp);
12224 }
12225
12226 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12227                                int new_mtu)
12228 {
12229         dev->mtu = new_mtu;
12230
12231         if (new_mtu > ETH_DATA_LEN) {
12232                 if (tg3_flag(tp, 5780_CLASS)) {
12233                         netdev_update_features(dev);
12234                         tg3_flag_clear(tp, TSO_CAPABLE);
12235                 } else {
12236                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
12237                 }
12238         } else {
12239                 if (tg3_flag(tp, 5780_CLASS)) {
12240                         tg3_flag_set(tp, TSO_CAPABLE);
12241                         netdev_update_features(dev);
12242                 }
12243                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12244         }
12245 }
12246
12247 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12248 {
12249         struct tg3 *tp = netdev_priv(dev);
12250         int err;
12251
12252         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12253                 return -EINVAL;
12254
12255         if (!netif_running(dev)) {
12256                 /* We'll just catch it later when the
12257                  * device is up'd.
12258                  */
12259                 tg3_set_mtu(dev, tp, new_mtu);
12260                 return 0;
12261         }
12262
12263         tg3_phy_stop(tp);
12264
12265         tg3_netif_stop(tp);
12266
12267         tg3_full_lock(tp, 1);
12268
12269         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12270
12271         tg3_set_mtu(dev, tp, new_mtu);
12272
12273         err = tg3_restart_hw(tp, 0);
12274
12275         if (!err)
12276                 tg3_netif_start(tp);
12277
12278         tg3_full_unlock(tp);
12279
12280         if (!err)
12281                 tg3_phy_start(tp);
12282
12283         return err;
12284 }
12285
12286 static const struct net_device_ops tg3_netdev_ops = {
12287         .ndo_open               = tg3_open,
12288         .ndo_stop               = tg3_close,
12289         .ndo_start_xmit         = tg3_start_xmit,
12290         .ndo_get_stats64        = tg3_get_stats64,
12291         .ndo_validate_addr      = eth_validate_addr,
12292         .ndo_set_rx_mode        = tg3_set_rx_mode,
12293         .ndo_set_mac_address    = tg3_set_mac_addr,
12294         .ndo_do_ioctl           = tg3_ioctl,
12295         .ndo_tx_timeout         = tg3_tx_timeout,
12296         .ndo_change_mtu         = tg3_change_mtu,
12297         .ndo_fix_features       = tg3_fix_features,
12298         .ndo_set_features       = tg3_set_features,
12299 #ifdef CONFIG_NET_POLL_CONTROLLER
12300         .ndo_poll_controller    = tg3_poll_controller,
12301 #endif
12302 };
12303
12304 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12305 {
12306         u32 cursize, val, magic;
12307
12308         tp->nvram_size = EEPROM_CHIP_SIZE;
12309
12310         if (tg3_nvram_read(tp, 0, &magic) != 0)
12311                 return;
12312
12313         if ((magic != TG3_EEPROM_MAGIC) &&
12314             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12315             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12316                 return;
12317
12318         /*
12319          * Size the chip by reading offsets at increasing powers of two.
12320          * When we encounter our validation signature, we know the addressing
12321          * has wrapped around, and thus have our chip size.
12322          */
12323         cursize = 0x10;
12324
12325         while (cursize < tp->nvram_size) {
12326                 if (tg3_nvram_read(tp, cursize, &val) != 0)
12327                         return;
12328
12329                 if (val == magic)
12330                         break;
12331
12332                 cursize <<= 1;
12333         }
12334
12335         tp->nvram_size = cursize;
12336 }
12337
12338 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12339 {
12340         u32 val;
12341
12342         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12343                 return;
12344
12345         /* Selfboot format */
12346         if (val != TG3_EEPROM_MAGIC) {
12347                 tg3_get_eeprom_size(tp);
12348                 return;
12349         }
12350
12351         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12352                 if (val != 0) {
12353                         /* This is confusing.  We want to operate on the
12354                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12355                          * call will read from NVRAM and byteswap the data
12356                          * according to the byteswapping settings for all
12357                          * other register accesses.  This ensures the data we
12358                          * want will always reside in the lower 16-bits.
12359                          * However, the data in NVRAM is in LE format, which
12360                          * means the data from the NVRAM read will always be
12361                          * opposite the endianness of the CPU.  The 16-bit
12362                          * byteswap then brings the data to CPU endianness.
12363                          */
12364                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12365                         return;
12366                 }
12367         }
12368         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12369 }
12370
12371 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12372 {
12373         u32 nvcfg1;
12374
12375         nvcfg1 = tr32(NVRAM_CFG1);
12376         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12377                 tg3_flag_set(tp, FLASH);
12378         } else {
12379                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12380                 tw32(NVRAM_CFG1, nvcfg1);
12381         }
12382
12383         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12384             tg3_flag(tp, 5780_CLASS)) {
12385                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12386                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12387                         tp->nvram_jedecnum = JEDEC_ATMEL;
12388                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12389                         tg3_flag_set(tp, NVRAM_BUFFERED);
12390                         break;
12391                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12392                         tp->nvram_jedecnum = JEDEC_ATMEL;
12393                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12394                         break;
12395                 case FLASH_VENDOR_ATMEL_EEPROM:
12396                         tp->nvram_jedecnum = JEDEC_ATMEL;
12397                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12398                         tg3_flag_set(tp, NVRAM_BUFFERED);
12399                         break;
12400                 case FLASH_VENDOR_ST:
12401                         tp->nvram_jedecnum = JEDEC_ST;
12402                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12403                         tg3_flag_set(tp, NVRAM_BUFFERED);
12404                         break;
12405                 case FLASH_VENDOR_SAIFUN:
12406                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12407                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12408                         break;
12409                 case FLASH_VENDOR_SST_SMALL:
12410                 case FLASH_VENDOR_SST_LARGE:
12411                         tp->nvram_jedecnum = JEDEC_SST;
12412                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12413                         break;
12414                 }
12415         } else {
12416                 tp->nvram_jedecnum = JEDEC_ATMEL;
12417                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12418                 tg3_flag_set(tp, NVRAM_BUFFERED);
12419         }
12420 }
12421
12422 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12423 {
12424         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12425         case FLASH_5752PAGE_SIZE_256:
12426                 tp->nvram_pagesize = 256;
12427                 break;
12428         case FLASH_5752PAGE_SIZE_512:
12429                 tp->nvram_pagesize = 512;
12430                 break;
12431         case FLASH_5752PAGE_SIZE_1K:
12432                 tp->nvram_pagesize = 1024;
12433                 break;
12434         case FLASH_5752PAGE_SIZE_2K:
12435                 tp->nvram_pagesize = 2048;
12436                 break;
12437         case FLASH_5752PAGE_SIZE_4K:
12438                 tp->nvram_pagesize = 4096;
12439                 break;
12440         case FLASH_5752PAGE_SIZE_264:
12441                 tp->nvram_pagesize = 264;
12442                 break;
12443         case FLASH_5752PAGE_SIZE_528:
12444                 tp->nvram_pagesize = 528;
12445                 break;
12446         }
12447 }
12448
12449 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12450 {
12451         u32 nvcfg1;
12452
12453         nvcfg1 = tr32(NVRAM_CFG1);
12454
12455         /* NVRAM protection for TPM */
12456         if (nvcfg1 & (1 << 27))
12457                 tg3_flag_set(tp, PROTECTED_NVRAM);
12458
12459         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12460         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12461         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12462                 tp->nvram_jedecnum = JEDEC_ATMEL;
12463                 tg3_flag_set(tp, NVRAM_BUFFERED);
12464                 break;
12465         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12466                 tp->nvram_jedecnum = JEDEC_ATMEL;
12467                 tg3_flag_set(tp, NVRAM_BUFFERED);
12468                 tg3_flag_set(tp, FLASH);
12469                 break;
12470         case FLASH_5752VENDOR_ST_M45PE10:
12471         case FLASH_5752VENDOR_ST_M45PE20:
12472         case FLASH_5752VENDOR_ST_M45PE40:
12473                 tp->nvram_jedecnum = JEDEC_ST;
12474                 tg3_flag_set(tp, NVRAM_BUFFERED);
12475                 tg3_flag_set(tp, FLASH);
12476                 break;
12477         }
12478
12479         if (tg3_flag(tp, FLASH)) {
12480                 tg3_nvram_get_pagesize(tp, nvcfg1);
12481         } else {
12482                 /* For eeprom, set pagesize to maximum eeprom size */
12483                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12484
12485                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12486                 tw32(NVRAM_CFG1, nvcfg1);
12487         }
12488 }
12489
12490 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12491 {
12492         u32 nvcfg1, protect = 0;
12493
12494         nvcfg1 = tr32(NVRAM_CFG1);
12495
12496         /* NVRAM protection for TPM */
12497         if (nvcfg1 & (1 << 27)) {
12498                 tg3_flag_set(tp, PROTECTED_NVRAM);
12499                 protect = 1;
12500         }
12501
12502         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12503         switch (nvcfg1) {
12504         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12505         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12506         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12507         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12508                 tp->nvram_jedecnum = JEDEC_ATMEL;
12509                 tg3_flag_set(tp, NVRAM_BUFFERED);
12510                 tg3_flag_set(tp, FLASH);
12511                 tp->nvram_pagesize = 264;
12512                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12513                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12514                         tp->nvram_size = (protect ? 0x3e200 :
12515                                           TG3_NVRAM_SIZE_512KB);
12516                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12517                         tp->nvram_size = (protect ? 0x1f200 :
12518                                           TG3_NVRAM_SIZE_256KB);
12519                 else
12520                         tp->nvram_size = (protect ? 0x1f200 :
12521                                           TG3_NVRAM_SIZE_128KB);
12522                 break;
12523         case FLASH_5752VENDOR_ST_M45PE10:
12524         case FLASH_5752VENDOR_ST_M45PE20:
12525         case FLASH_5752VENDOR_ST_M45PE40:
12526                 tp->nvram_jedecnum = JEDEC_ST;
12527                 tg3_flag_set(tp, NVRAM_BUFFERED);
12528                 tg3_flag_set(tp, FLASH);
12529                 tp->nvram_pagesize = 256;
12530                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12531                         tp->nvram_size = (protect ?
12532                                           TG3_NVRAM_SIZE_64KB :
12533                                           TG3_NVRAM_SIZE_128KB);
12534                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12535                         tp->nvram_size = (protect ?
12536                                           TG3_NVRAM_SIZE_64KB :
12537                                           TG3_NVRAM_SIZE_256KB);
12538                 else
12539                         tp->nvram_size = (protect ?
12540                                           TG3_NVRAM_SIZE_128KB :
12541                                           TG3_NVRAM_SIZE_512KB);
12542                 break;
12543         }
12544 }
12545
12546 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12547 {
12548         u32 nvcfg1;
12549
12550         nvcfg1 = tr32(NVRAM_CFG1);
12551
12552         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12553         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12554         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12555         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12556         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12557                 tp->nvram_jedecnum = JEDEC_ATMEL;
12558                 tg3_flag_set(tp, NVRAM_BUFFERED);
12559                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12560
12561                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12562                 tw32(NVRAM_CFG1, nvcfg1);
12563                 break;
12564         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12565         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12566         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12567         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12568                 tp->nvram_jedecnum = JEDEC_ATMEL;
12569                 tg3_flag_set(tp, NVRAM_BUFFERED);
12570                 tg3_flag_set(tp, FLASH);
12571                 tp->nvram_pagesize = 264;
12572                 break;
12573         case FLASH_5752VENDOR_ST_M45PE10:
12574         case FLASH_5752VENDOR_ST_M45PE20:
12575         case FLASH_5752VENDOR_ST_M45PE40:
12576                 tp->nvram_jedecnum = JEDEC_ST;
12577                 tg3_flag_set(tp, NVRAM_BUFFERED);
12578                 tg3_flag_set(tp, FLASH);
12579                 tp->nvram_pagesize = 256;
12580                 break;
12581         }
12582 }
12583
12584 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12585 {
12586         u32 nvcfg1, protect = 0;
12587
12588         nvcfg1 = tr32(NVRAM_CFG1);
12589
12590         /* NVRAM protection for TPM */
12591         if (nvcfg1 & (1 << 27)) {
12592                 tg3_flag_set(tp, PROTECTED_NVRAM);
12593                 protect = 1;
12594         }
12595
12596         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12597         switch (nvcfg1) {
12598         case FLASH_5761VENDOR_ATMEL_ADB021D:
12599         case FLASH_5761VENDOR_ATMEL_ADB041D:
12600         case FLASH_5761VENDOR_ATMEL_ADB081D:
12601         case FLASH_5761VENDOR_ATMEL_ADB161D:
12602         case FLASH_5761VENDOR_ATMEL_MDB021D:
12603         case FLASH_5761VENDOR_ATMEL_MDB041D:
12604         case FLASH_5761VENDOR_ATMEL_MDB081D:
12605         case FLASH_5761VENDOR_ATMEL_MDB161D:
12606                 tp->nvram_jedecnum = JEDEC_ATMEL;
12607                 tg3_flag_set(tp, NVRAM_BUFFERED);
12608                 tg3_flag_set(tp, FLASH);
12609                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12610                 tp->nvram_pagesize = 256;
12611                 break;
12612         case FLASH_5761VENDOR_ST_A_M45PE20:
12613         case FLASH_5761VENDOR_ST_A_M45PE40:
12614         case FLASH_5761VENDOR_ST_A_M45PE80:
12615         case FLASH_5761VENDOR_ST_A_M45PE16:
12616         case FLASH_5761VENDOR_ST_M_M45PE20:
12617         case FLASH_5761VENDOR_ST_M_M45PE40:
12618         case FLASH_5761VENDOR_ST_M_M45PE80:
12619         case FLASH_5761VENDOR_ST_M_M45PE16:
12620                 tp->nvram_jedecnum = JEDEC_ST;
12621                 tg3_flag_set(tp, NVRAM_BUFFERED);
12622                 tg3_flag_set(tp, FLASH);
12623                 tp->nvram_pagesize = 256;
12624                 break;
12625         }
12626
12627         if (protect) {
12628                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12629         } else {
12630                 switch (nvcfg1) {
12631                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12632                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12633                 case FLASH_5761VENDOR_ST_A_M45PE16:
12634                 case FLASH_5761VENDOR_ST_M_M45PE16:
12635                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12636                         break;
12637                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12638                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12639                 case FLASH_5761VENDOR_ST_A_M45PE80:
12640                 case FLASH_5761VENDOR_ST_M_M45PE80:
12641                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12642                         break;
12643                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12644                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12645                 case FLASH_5761VENDOR_ST_A_M45PE40:
12646                 case FLASH_5761VENDOR_ST_M_M45PE40:
12647                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12648                         break;
12649                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12650                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12651                 case FLASH_5761VENDOR_ST_A_M45PE20:
12652                 case FLASH_5761VENDOR_ST_M_M45PE20:
12653                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12654                         break;
12655                 }
12656         }
12657 }
12658
12659 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12660 {
12661         tp->nvram_jedecnum = JEDEC_ATMEL;
12662         tg3_flag_set(tp, NVRAM_BUFFERED);
12663         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12664 }
12665
12666 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12667 {
12668         u32 nvcfg1;
12669
12670         nvcfg1 = tr32(NVRAM_CFG1);
12671
12672         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12673         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12674         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12675                 tp->nvram_jedecnum = JEDEC_ATMEL;
12676                 tg3_flag_set(tp, NVRAM_BUFFERED);
12677                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12678
12679                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12680                 tw32(NVRAM_CFG1, nvcfg1);
12681                 return;
12682         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12683         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12684         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12685         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12686         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12687         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12688         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12689                 tp->nvram_jedecnum = JEDEC_ATMEL;
12690                 tg3_flag_set(tp, NVRAM_BUFFERED);
12691                 tg3_flag_set(tp, FLASH);
12692
12693                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12694                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12695                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12696                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12697                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12698                         break;
12699                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12700                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12701                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12702                         break;
12703                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12704                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12705                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12706                         break;
12707                 }
12708                 break;
12709         case FLASH_5752VENDOR_ST_M45PE10:
12710         case FLASH_5752VENDOR_ST_M45PE20:
12711         case FLASH_5752VENDOR_ST_M45PE40:
12712                 tp->nvram_jedecnum = JEDEC_ST;
12713                 tg3_flag_set(tp, NVRAM_BUFFERED);
12714                 tg3_flag_set(tp, FLASH);
12715
12716                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12717                 case FLASH_5752VENDOR_ST_M45PE10:
12718                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12719                         break;
12720                 case FLASH_5752VENDOR_ST_M45PE20:
12721                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12722                         break;
12723                 case FLASH_5752VENDOR_ST_M45PE40:
12724                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12725                         break;
12726                 }
12727                 break;
12728         default:
12729                 tg3_flag_set(tp, NO_NVRAM);
12730                 return;
12731         }
12732
12733         tg3_nvram_get_pagesize(tp, nvcfg1);
12734         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12735                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12736 }
12737
12738
12739 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12740 {
12741         u32 nvcfg1;
12742
12743         nvcfg1 = tr32(NVRAM_CFG1);
12744
12745         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12746         case FLASH_5717VENDOR_ATMEL_EEPROM:
12747         case FLASH_5717VENDOR_MICRO_EEPROM:
12748                 tp->nvram_jedecnum = JEDEC_ATMEL;
12749                 tg3_flag_set(tp, NVRAM_BUFFERED);
12750                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12751
12752                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12753                 tw32(NVRAM_CFG1, nvcfg1);
12754                 return;
12755         case FLASH_5717VENDOR_ATMEL_MDB011D:
12756         case FLASH_5717VENDOR_ATMEL_ADB011B:
12757         case FLASH_5717VENDOR_ATMEL_ADB011D:
12758         case FLASH_5717VENDOR_ATMEL_MDB021D:
12759         case FLASH_5717VENDOR_ATMEL_ADB021B:
12760         case FLASH_5717VENDOR_ATMEL_ADB021D:
12761         case FLASH_5717VENDOR_ATMEL_45USPT:
12762                 tp->nvram_jedecnum = JEDEC_ATMEL;
12763                 tg3_flag_set(tp, NVRAM_BUFFERED);
12764                 tg3_flag_set(tp, FLASH);
12765
12766                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12767                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12768                         /* Detect size with tg3_nvram_get_size() */
12769                         break;
12770                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12771                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12772                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12773                         break;
12774                 default:
12775                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12776                         break;
12777                 }
12778                 break;
12779         case FLASH_5717VENDOR_ST_M_M25PE10:
12780         case FLASH_5717VENDOR_ST_A_M25PE10:
12781         case FLASH_5717VENDOR_ST_M_M45PE10:
12782         case FLASH_5717VENDOR_ST_A_M45PE10:
12783         case FLASH_5717VENDOR_ST_M_M25PE20:
12784         case FLASH_5717VENDOR_ST_A_M25PE20:
12785         case FLASH_5717VENDOR_ST_M_M45PE20:
12786         case FLASH_5717VENDOR_ST_A_M45PE20:
12787         case FLASH_5717VENDOR_ST_25USPT:
12788         case FLASH_5717VENDOR_ST_45USPT:
12789                 tp->nvram_jedecnum = JEDEC_ST;
12790                 tg3_flag_set(tp, NVRAM_BUFFERED);
12791                 tg3_flag_set(tp, FLASH);
12792
12793                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12794                 case FLASH_5717VENDOR_ST_M_M25PE20:
12795                 case FLASH_5717VENDOR_ST_M_M45PE20:
12796                         /* Detect size with tg3_nvram_get_size() */
12797                         break;
12798                 case FLASH_5717VENDOR_ST_A_M25PE20:
12799                 case FLASH_5717VENDOR_ST_A_M45PE20:
12800                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12801                         break;
12802                 default:
12803                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12804                         break;
12805                 }
12806                 break;
12807         default:
12808                 tg3_flag_set(tp, NO_NVRAM);
12809                 return;
12810         }
12811
12812         tg3_nvram_get_pagesize(tp, nvcfg1);
12813         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12814                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12815 }
12816
12817 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12818 {
12819         u32 nvcfg1, nvmpinstrp;
12820
12821         nvcfg1 = tr32(NVRAM_CFG1);
12822         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12823
12824         switch (nvmpinstrp) {
12825         case FLASH_5720_EEPROM_HD:
12826         case FLASH_5720_EEPROM_LD:
12827                 tp->nvram_jedecnum = JEDEC_ATMEL;
12828                 tg3_flag_set(tp, NVRAM_BUFFERED);
12829
12830                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12831                 tw32(NVRAM_CFG1, nvcfg1);
12832                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12833                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12834                 else
12835                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12836                 return;
12837         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12838         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12839         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12840         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12841         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12842         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12843         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12844         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12845         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12846         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12847         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12848         case FLASH_5720VENDOR_ATMEL_45USPT:
12849                 tp->nvram_jedecnum = JEDEC_ATMEL;
12850                 tg3_flag_set(tp, NVRAM_BUFFERED);
12851                 tg3_flag_set(tp, FLASH);
12852
12853                 switch (nvmpinstrp) {
12854                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12855                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12856                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12857                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12858                         break;
12859                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12860                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12861                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12862                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12863                         break;
12864                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12865                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12866                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12867                         break;
12868                 default:
12869                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12870                         break;
12871                 }
12872                 break;
12873         case FLASH_5720VENDOR_M_ST_M25PE10:
12874         case FLASH_5720VENDOR_M_ST_M45PE10:
12875         case FLASH_5720VENDOR_A_ST_M25PE10:
12876         case FLASH_5720VENDOR_A_ST_M45PE10:
12877         case FLASH_5720VENDOR_M_ST_M25PE20:
12878         case FLASH_5720VENDOR_M_ST_M45PE20:
12879         case FLASH_5720VENDOR_A_ST_M25PE20:
12880         case FLASH_5720VENDOR_A_ST_M45PE20:
12881         case FLASH_5720VENDOR_M_ST_M25PE40:
12882         case FLASH_5720VENDOR_M_ST_M45PE40:
12883         case FLASH_5720VENDOR_A_ST_M25PE40:
12884         case FLASH_5720VENDOR_A_ST_M45PE40:
12885         case FLASH_5720VENDOR_M_ST_M25PE80:
12886         case FLASH_5720VENDOR_M_ST_M45PE80:
12887         case FLASH_5720VENDOR_A_ST_M25PE80:
12888         case FLASH_5720VENDOR_A_ST_M45PE80:
12889         case FLASH_5720VENDOR_ST_25USPT:
12890         case FLASH_5720VENDOR_ST_45USPT:
12891                 tp->nvram_jedecnum = JEDEC_ST;
12892                 tg3_flag_set(tp, NVRAM_BUFFERED);
12893                 tg3_flag_set(tp, FLASH);
12894
12895                 switch (nvmpinstrp) {
12896                 case FLASH_5720VENDOR_M_ST_M25PE20:
12897                 case FLASH_5720VENDOR_M_ST_M45PE20:
12898                 case FLASH_5720VENDOR_A_ST_M25PE20:
12899                 case FLASH_5720VENDOR_A_ST_M45PE20:
12900                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12901                         break;
12902                 case FLASH_5720VENDOR_M_ST_M25PE40:
12903                 case FLASH_5720VENDOR_M_ST_M45PE40:
12904                 case FLASH_5720VENDOR_A_ST_M25PE40:
12905                 case FLASH_5720VENDOR_A_ST_M45PE40:
12906                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12907                         break;
12908                 case FLASH_5720VENDOR_M_ST_M25PE80:
12909                 case FLASH_5720VENDOR_M_ST_M45PE80:
12910                 case FLASH_5720VENDOR_A_ST_M25PE80:
12911                 case FLASH_5720VENDOR_A_ST_M45PE80:
12912                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12913                         break;
12914                 default:
12915                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12916                         break;
12917                 }
12918                 break;
12919         default:
12920                 tg3_flag_set(tp, NO_NVRAM);
12921                 return;
12922         }
12923
12924         tg3_nvram_get_pagesize(tp, nvcfg1);
12925         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12926                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12927 }
12928
12929 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12930 static void __devinit tg3_nvram_init(struct tg3 *tp)
12931 {
12932         tw32_f(GRC_EEPROM_ADDR,
12933              (EEPROM_ADDR_FSM_RESET |
12934               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12935                EEPROM_ADDR_CLKPERD_SHIFT)));
12936
12937         msleep(1);
12938
12939         /* Enable seeprom accesses. */
12940         tw32_f(GRC_LOCAL_CTRL,
12941              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12942         udelay(100);
12943
12944         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12945             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12946                 tg3_flag_set(tp, NVRAM);
12947
12948                 if (tg3_nvram_lock(tp)) {
12949                         netdev_warn(tp->dev,
12950                                     "Cannot get nvram lock, %s failed\n",
12951                                     __func__);
12952                         return;
12953                 }
12954                 tg3_enable_nvram_access(tp);
12955
12956                 tp->nvram_size = 0;
12957
12958                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12959                         tg3_get_5752_nvram_info(tp);
12960                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12961                         tg3_get_5755_nvram_info(tp);
12962                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12963                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12964                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12965                         tg3_get_5787_nvram_info(tp);
12966                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12967                         tg3_get_5761_nvram_info(tp);
12968                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12969                         tg3_get_5906_nvram_info(tp);
12970                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12971                          tg3_flag(tp, 57765_CLASS))
12972                         tg3_get_57780_nvram_info(tp);
12973                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12974                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12975                         tg3_get_5717_nvram_info(tp);
12976                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12977                         tg3_get_5720_nvram_info(tp);
12978                 else
12979                         tg3_get_nvram_info(tp);
12980
12981                 if (tp->nvram_size == 0)
12982                         tg3_get_nvram_size(tp);
12983
12984                 tg3_disable_nvram_access(tp);
12985                 tg3_nvram_unlock(tp);
12986
12987         } else {
12988                 tg3_flag_clear(tp, NVRAM);
12989                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12990
12991                 tg3_get_eeprom_size(tp);
12992         }
12993 }
12994
12995 struct subsys_tbl_ent {
12996         u16 subsys_vendor, subsys_devid;
12997         u32 phy_id;
12998 };
12999
13000 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13001         /* Broadcom boards. */
13002         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13003           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13004         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13005           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13006         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13007           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13008         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13009           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13010         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13011           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13012         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13013           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13014         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13015           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13016         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13017           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13018         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13019           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13020         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13021           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13022         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13023           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13024
13025         /* 3com boards. */
13026         { TG3PCI_SUBVENDOR_ID_3COM,
13027           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13028         { TG3PCI_SUBVENDOR_ID_3COM,
13029           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13030         { TG3PCI_SUBVENDOR_ID_3COM,
13031           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13032         { TG3PCI_SUBVENDOR_ID_3COM,
13033           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13034         { TG3PCI_SUBVENDOR_ID_3COM,
13035           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13036
13037         /* DELL boards. */
13038         { TG3PCI_SUBVENDOR_ID_DELL,
13039           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13040         { TG3PCI_SUBVENDOR_ID_DELL,
13041           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13042         { TG3PCI_SUBVENDOR_ID_DELL,
13043           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13044         { TG3PCI_SUBVENDOR_ID_DELL,
13045           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13046
13047         /* Compaq boards. */
13048         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13049           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13050         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13051           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13052         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13053           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13054         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13055           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13056         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13057           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13058
13059         /* IBM boards. */
13060         { TG3PCI_SUBVENDOR_ID_IBM,
13061           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13062 };
13063
13064 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13065 {
13066         int i;
13067
13068         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13069                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13070                      tp->pdev->subsystem_vendor) &&
13071                     (subsys_id_to_phy_id[i].subsys_devid ==
13072                      tp->pdev->subsystem_device))
13073                         return &subsys_id_to_phy_id[i];
13074         }
13075         return NULL;
13076 }
13077
13078 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13079 {
13080         u32 val;
13081
13082         tp->phy_id = TG3_PHY_ID_INVALID;
13083         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13084
13085         /* Assume an onboard device and WOL capable by default.  */
13086         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13087         tg3_flag_set(tp, WOL_CAP);
13088
13089         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13090                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13091                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13092                         tg3_flag_set(tp, IS_NIC);
13093                 }
13094                 val = tr32(VCPU_CFGSHDW);
13095                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13096                         tg3_flag_set(tp, ASPM_WORKAROUND);
13097                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13098                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13099                         tg3_flag_set(tp, WOL_ENABLE);
13100                         device_set_wakeup_enable(&tp->pdev->dev, true);
13101                 }
13102                 goto done;
13103         }
13104
13105         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13106         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13107                 u32 nic_cfg, led_cfg;
13108                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13109                 int eeprom_phy_serdes = 0;
13110
13111                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13112                 tp->nic_sram_data_cfg = nic_cfg;
13113
13114                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13115                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13116                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13117                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13118                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13119                     (ver > 0) && (ver < 0x100))
13120                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13121
13122                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13123                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13124
13125                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13126                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13127                         eeprom_phy_serdes = 1;
13128
13129                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13130                 if (nic_phy_id != 0) {
13131                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13132                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13133
13134                         eeprom_phy_id  = (id1 >> 16) << 10;
13135                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13136                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13137                 } else
13138                         eeprom_phy_id = 0;
13139
13140                 tp->phy_id = eeprom_phy_id;
13141                 if (eeprom_phy_serdes) {
13142                         if (!tg3_flag(tp, 5705_PLUS))
13143                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13144                         else
13145                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13146                 }
13147
13148                 if (tg3_flag(tp, 5750_PLUS))
13149                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13150                                     SHASTA_EXT_LED_MODE_MASK);
13151                 else
13152                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13153
13154                 switch (led_cfg) {
13155                 default:
13156                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13157                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13158                         break;
13159
13160                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13161                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13162                         break;
13163
13164                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13165                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13166
13167                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13168                          * read on some older 5700/5701 bootcode.
13169                          */
13170                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13171                             ASIC_REV_5700 ||
13172                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13173                             ASIC_REV_5701)
13174                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13175
13176                         break;
13177
13178                 case SHASTA_EXT_LED_SHARED:
13179                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13180                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13181                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13182                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13183                                                  LED_CTRL_MODE_PHY_2);
13184                         break;
13185
13186                 case SHASTA_EXT_LED_MAC:
13187                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13188                         break;
13189
13190                 case SHASTA_EXT_LED_COMBO:
13191                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13192                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13193                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13194                                                  LED_CTRL_MODE_PHY_2);
13195                         break;
13196
13197                 }
13198
13199                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13200                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13201                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13202                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13203
13204                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13205                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13206
13207                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13208                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13209                         if ((tp->pdev->subsystem_vendor ==
13210                              PCI_VENDOR_ID_ARIMA) &&
13211                             (tp->pdev->subsystem_device == 0x205a ||
13212                              tp->pdev->subsystem_device == 0x2063))
13213                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13214                 } else {
13215                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13216                         tg3_flag_set(tp, IS_NIC);
13217                 }
13218
13219                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13220                         tg3_flag_set(tp, ENABLE_ASF);
13221                         if (tg3_flag(tp, 5750_PLUS))
13222                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13223                 }
13224
13225                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13226                     tg3_flag(tp, 5750_PLUS))
13227                         tg3_flag_set(tp, ENABLE_APE);
13228
13229                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13230                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13231                         tg3_flag_clear(tp, WOL_CAP);
13232
13233                 if (tg3_flag(tp, WOL_CAP) &&
13234                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13235                         tg3_flag_set(tp, WOL_ENABLE);
13236                         device_set_wakeup_enable(&tp->pdev->dev, true);
13237                 }
13238
13239                 if (cfg2 & (1 << 17))
13240                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13241
13242                 /* serdes signal pre-emphasis in register 0x590 set by */
13243                 /* bootcode if bit 18 is set */
13244                 if (cfg2 & (1 << 18))
13245                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13246
13247                 if ((tg3_flag(tp, 57765_PLUS) ||
13248                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13249                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13250                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13251                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13252
13253                 if (tg3_flag(tp, PCI_EXPRESS) &&
13254                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13255                     !tg3_flag(tp, 57765_PLUS)) {
13256                         u32 cfg3;
13257
13258                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13259                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13260                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13261                 }
13262
13263                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13264                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13265                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13266                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13267                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13268                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13269         }
13270 done:
13271         if (tg3_flag(tp, WOL_CAP))
13272                 device_set_wakeup_enable(&tp->pdev->dev,
13273                                          tg3_flag(tp, WOL_ENABLE));
13274         else
13275                 device_set_wakeup_capable(&tp->pdev->dev, false);
13276 }
13277
13278 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13279 {
13280         int i;
13281         u32 val;
13282
13283         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13284         tw32(OTP_CTRL, cmd);
13285
13286         /* Wait for up to 1 ms for command to execute. */
13287         for (i = 0; i < 100; i++) {
13288                 val = tr32(OTP_STATUS);
13289                 if (val & OTP_STATUS_CMD_DONE)
13290                         break;
13291                 udelay(10);
13292         }
13293
13294         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13295 }
13296
13297 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13298  * configuration is a 32-bit value that straddles the alignment boundary.
13299  * We do two 32-bit reads and then shift and merge the results.
13300  */
13301 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13302 {
13303         u32 bhalf_otp, thalf_otp;
13304
13305         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13306
13307         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13308                 return 0;
13309
13310         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13311
13312         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13313                 return 0;
13314
13315         thalf_otp = tr32(OTP_READ_DATA);
13316
13317         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13318
13319         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13320                 return 0;
13321
13322         bhalf_otp = tr32(OTP_READ_DATA);
13323
13324         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13325 }
13326
13327 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13328 {
13329         u32 adv = ADVERTISED_Autoneg;
13330
13331         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13332                 adv |= ADVERTISED_1000baseT_Half |
13333                        ADVERTISED_1000baseT_Full;
13334
13335         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13336                 adv |= ADVERTISED_100baseT_Half |
13337                        ADVERTISED_100baseT_Full |
13338                        ADVERTISED_10baseT_Half |
13339                        ADVERTISED_10baseT_Full |
13340                        ADVERTISED_TP;
13341         else
13342                 adv |= ADVERTISED_FIBRE;
13343
13344         tp->link_config.advertising = adv;
13345         tp->link_config.speed = SPEED_INVALID;
13346         tp->link_config.duplex = DUPLEX_INVALID;
13347         tp->link_config.autoneg = AUTONEG_ENABLE;
13348         tp->link_config.active_speed = SPEED_INVALID;
13349         tp->link_config.active_duplex = DUPLEX_INVALID;
13350         tp->link_config.orig_speed = SPEED_INVALID;
13351         tp->link_config.orig_duplex = DUPLEX_INVALID;
13352         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13353 }
13354
13355 static int __devinit tg3_phy_probe(struct tg3 *tp)
13356 {
13357         u32 hw_phy_id_1, hw_phy_id_2;
13358         u32 hw_phy_id, hw_phy_id_masked;
13359         int err;
13360
13361         /* flow control autonegotiation is default behavior */
13362         tg3_flag_set(tp, PAUSE_AUTONEG);
13363         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13364
13365         if (tg3_flag(tp, USE_PHYLIB))
13366                 return tg3_phy_init(tp);
13367
13368         /* Reading the PHY ID register can conflict with ASF
13369          * firmware access to the PHY hardware.
13370          */
13371         err = 0;
13372         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13373                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13374         } else {
13375                 /* Now read the physical PHY_ID from the chip and verify
13376                  * that it is sane.  If it doesn't look good, we fall back
13377                  * to either the hard-coded table based PHY_ID and failing
13378                  * that the value found in the eeprom area.
13379                  */
13380                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13381                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13382
13383                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13384                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13385                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13386
13387                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13388         }
13389
13390         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13391                 tp->phy_id = hw_phy_id;
13392                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13393                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13394                 else
13395                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13396         } else {
13397                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13398                         /* Do nothing, phy ID already set up in
13399                          * tg3_get_eeprom_hw_cfg().
13400                          */
13401                 } else {
13402                         struct subsys_tbl_ent *p;
13403
13404                         /* No eeprom signature?  Try the hardcoded
13405                          * subsys device table.
13406                          */
13407                         p = tg3_lookup_by_subsys(tp);
13408                         if (!p)
13409                                 return -ENODEV;
13410
13411                         tp->phy_id = p->phy_id;
13412                         if (!tp->phy_id ||
13413                             tp->phy_id == TG3_PHY_ID_BCM8002)
13414                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13415                 }
13416         }
13417
13418         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13419             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13420              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13421              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13422               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13423              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13424               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13425                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13426
13427         tg3_phy_init_link_config(tp);
13428
13429         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13430             !tg3_flag(tp, ENABLE_APE) &&
13431             !tg3_flag(tp, ENABLE_ASF)) {
13432                 u32 bmsr, dummy;
13433
13434                 tg3_readphy(tp, MII_BMSR, &bmsr);
13435                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13436                     (bmsr & BMSR_LSTATUS))
13437                         goto skip_phy_reset;
13438
13439                 err = tg3_phy_reset(tp);
13440                 if (err)
13441                         return err;
13442
13443                 tg3_phy_set_wirespeed(tp);
13444
13445                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13446                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13447                                             tp->link_config.flowctrl);
13448
13449                         tg3_writephy(tp, MII_BMCR,
13450                                      BMCR_ANENABLE | BMCR_ANRESTART);
13451                 }
13452         }
13453
13454 skip_phy_reset:
13455         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13456                 err = tg3_init_5401phy_dsp(tp);
13457                 if (err)
13458                         return err;
13459
13460                 err = tg3_init_5401phy_dsp(tp);
13461         }
13462
13463         return err;
13464 }
13465
13466 static void __devinit tg3_read_vpd(struct tg3 *tp)
13467 {
13468         u8 *vpd_data;
13469         unsigned int block_end, rosize, len;
13470         u32 vpdlen;
13471         int j, i = 0;
13472
13473         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13474         if (!vpd_data)
13475                 goto out_no_vpd;
13476
13477         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13478         if (i < 0)
13479                 goto out_not_found;
13480
13481         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13482         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13483         i += PCI_VPD_LRDT_TAG_SIZE;
13484
13485         if (block_end > vpdlen)
13486                 goto out_not_found;
13487
13488         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13489                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13490         if (j > 0) {
13491                 len = pci_vpd_info_field_size(&vpd_data[j]);
13492
13493                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13494                 if (j + len > block_end || len != 4 ||
13495                     memcmp(&vpd_data[j], "1028", 4))
13496                         goto partno;
13497
13498                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13499                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13500                 if (j < 0)
13501                         goto partno;
13502
13503                 len = pci_vpd_info_field_size(&vpd_data[j]);
13504
13505                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13506                 if (j + len > block_end)
13507                         goto partno;
13508
13509                 memcpy(tp->fw_ver, &vpd_data[j], len);
13510                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13511         }
13512
13513 partno:
13514         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13515                                       PCI_VPD_RO_KEYWORD_PARTNO);
13516         if (i < 0)
13517                 goto out_not_found;
13518
13519         len = pci_vpd_info_field_size(&vpd_data[i]);
13520
13521         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13522         if (len > TG3_BPN_SIZE ||
13523             (len + i) > vpdlen)
13524                 goto out_not_found;
13525
13526         memcpy(tp->board_part_number, &vpd_data[i], len);
13527
13528 out_not_found:
13529         kfree(vpd_data);
13530         if (tp->board_part_number[0])
13531                 return;
13532
13533 out_no_vpd:
13534         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13535                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13536                         strcpy(tp->board_part_number, "BCM5717");
13537                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13538                         strcpy(tp->board_part_number, "BCM5718");
13539                 else
13540                         goto nomatch;
13541         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13542                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13543                         strcpy(tp->board_part_number, "BCM57780");
13544                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13545                         strcpy(tp->board_part_number, "BCM57760");
13546                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13547                         strcpy(tp->board_part_number, "BCM57790");
13548                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13549                         strcpy(tp->board_part_number, "BCM57788");
13550                 else
13551                         goto nomatch;
13552         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13553                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13554                         strcpy(tp->board_part_number, "BCM57761");
13555                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13556                         strcpy(tp->board_part_number, "BCM57765");
13557                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13558                         strcpy(tp->board_part_number, "BCM57781");
13559                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13560                         strcpy(tp->board_part_number, "BCM57785");
13561                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13562                         strcpy(tp->board_part_number, "BCM57791");
13563                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13564                         strcpy(tp->board_part_number, "BCM57795");
13565                 else
13566                         goto nomatch;
13567         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13568                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13569                         strcpy(tp->board_part_number, "BCM57762");
13570                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13571                         strcpy(tp->board_part_number, "BCM57766");
13572                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13573                         strcpy(tp->board_part_number, "BCM57782");
13574                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13575                         strcpy(tp->board_part_number, "BCM57786");
13576                 else
13577                         goto nomatch;
13578         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13579                 strcpy(tp->board_part_number, "BCM95906");
13580         } else {
13581 nomatch:
13582                 strcpy(tp->board_part_number, "none");
13583         }
13584 }
13585
13586 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13587 {
13588         u32 val;
13589
13590         if (tg3_nvram_read(tp, offset, &val) ||
13591             (val & 0xfc000000) != 0x0c000000 ||
13592             tg3_nvram_read(tp, offset + 4, &val) ||
13593             val != 0)
13594                 return 0;
13595
13596         return 1;
13597 }
13598
13599 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13600 {
13601         u32 val, offset, start, ver_offset;
13602         int i, dst_off;
13603         bool newver = false;
13604
13605         if (tg3_nvram_read(tp, 0xc, &offset) ||
13606             tg3_nvram_read(tp, 0x4, &start))
13607                 return;
13608
13609         offset = tg3_nvram_logical_addr(tp, offset);
13610
13611         if (tg3_nvram_read(tp, offset, &val))
13612                 return;
13613
13614         if ((val & 0xfc000000) == 0x0c000000) {
13615                 if (tg3_nvram_read(tp, offset + 4, &val))
13616                         return;
13617
13618                 if (val == 0)
13619                         newver = true;
13620         }
13621
13622         dst_off = strlen(tp->fw_ver);
13623
13624         if (newver) {
13625                 if (TG3_VER_SIZE - dst_off < 16 ||
13626                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13627                         return;
13628
13629                 offset = offset + ver_offset - start;
13630                 for (i = 0; i < 16; i += 4) {
13631                         __be32 v;
13632                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13633                                 return;
13634
13635                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13636                 }
13637         } else {
13638                 u32 major, minor;
13639
13640                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13641                         return;
13642
13643                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13644                         TG3_NVM_BCVER_MAJSFT;
13645                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13646                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13647                          "v%d.%02d", major, minor);
13648         }
13649 }
13650
13651 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13652 {
13653         u32 val, major, minor;
13654
13655         /* Use native endian representation */
13656         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13657                 return;
13658
13659         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13660                 TG3_NVM_HWSB_CFG1_MAJSFT;
13661         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13662                 TG3_NVM_HWSB_CFG1_MINSFT;
13663
13664         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13665 }
13666
13667 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13668 {
13669         u32 offset, major, minor, build;
13670
13671         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13672
13673         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13674                 return;
13675
13676         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13677         case TG3_EEPROM_SB_REVISION_0:
13678                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13679                 break;
13680         case TG3_EEPROM_SB_REVISION_2:
13681                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13682                 break;
13683         case TG3_EEPROM_SB_REVISION_3:
13684                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13685                 break;
13686         case TG3_EEPROM_SB_REVISION_4:
13687                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13688                 break;
13689         case TG3_EEPROM_SB_REVISION_5:
13690                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13691                 break;
13692         case TG3_EEPROM_SB_REVISION_6:
13693                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13694                 break;
13695         default:
13696                 return;
13697         }
13698
13699         if (tg3_nvram_read(tp, offset, &val))
13700                 return;
13701
13702         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13703                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13704         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13705                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13706         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13707
13708         if (minor > 99 || build > 26)
13709                 return;
13710
13711         offset = strlen(tp->fw_ver);
13712         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13713                  " v%d.%02d", major, minor);
13714
13715         if (build > 0) {
13716                 offset = strlen(tp->fw_ver);
13717                 if (offset < TG3_VER_SIZE - 1)
13718                         tp->fw_ver[offset] = 'a' + build - 1;
13719         }
13720 }
13721
13722 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13723 {
13724         u32 val, offset, start;
13725         int i, vlen;
13726
13727         for (offset = TG3_NVM_DIR_START;
13728              offset < TG3_NVM_DIR_END;
13729              offset += TG3_NVM_DIRENT_SIZE) {
13730                 if (tg3_nvram_read(tp, offset, &val))
13731                         return;
13732
13733                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13734                         break;
13735         }
13736
13737         if (offset == TG3_NVM_DIR_END)
13738                 return;
13739
13740         if (!tg3_flag(tp, 5705_PLUS))
13741                 start = 0x08000000;
13742         else if (tg3_nvram_read(tp, offset - 4, &start))
13743                 return;
13744
13745         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13746             !tg3_fw_img_is_valid(tp, offset) ||
13747             tg3_nvram_read(tp, offset + 8, &val))
13748                 return;
13749
13750         offset += val - start;
13751
13752         vlen = strlen(tp->fw_ver);
13753
13754         tp->fw_ver[vlen++] = ',';
13755         tp->fw_ver[vlen++] = ' ';
13756
13757         for (i = 0; i < 4; i++) {
13758                 __be32 v;
13759                 if (tg3_nvram_read_be32(tp, offset, &v))
13760                         return;
13761
13762                 offset += sizeof(v);
13763
13764                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13765                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13766                         break;
13767                 }
13768
13769                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13770                 vlen += sizeof(v);
13771         }
13772 }
13773
13774 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13775 {
13776         int vlen;
13777         u32 apedata;
13778         char *fwtype;
13779
13780         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13781                 return;
13782
13783         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13784         if (apedata != APE_SEG_SIG_MAGIC)
13785                 return;
13786
13787         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13788         if (!(apedata & APE_FW_STATUS_READY))
13789                 return;
13790
13791         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13792
13793         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13794                 tg3_flag_set(tp, APE_HAS_NCSI);
13795                 fwtype = "NCSI";
13796         } else {
13797                 fwtype = "DASH";
13798         }
13799
13800         vlen = strlen(tp->fw_ver);
13801
13802         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13803                  fwtype,
13804                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13805                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13806                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13807                  (apedata & APE_FW_VERSION_BLDMSK));
13808 }
13809
13810 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13811 {
13812         u32 val;
13813         bool vpd_vers = false;
13814
13815         if (tp->fw_ver[0] != 0)
13816                 vpd_vers = true;
13817
13818         if (tg3_flag(tp, NO_NVRAM)) {
13819                 strcat(tp->fw_ver, "sb");
13820                 return;
13821         }
13822
13823         if (tg3_nvram_read(tp, 0, &val))
13824                 return;
13825
13826         if (val == TG3_EEPROM_MAGIC)
13827                 tg3_read_bc_ver(tp);
13828         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13829                 tg3_read_sb_ver(tp, val);
13830         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13831                 tg3_read_hwsb_ver(tp);
13832         else
13833                 return;
13834
13835         if (vpd_vers)
13836                 goto done;
13837
13838         if (tg3_flag(tp, ENABLE_APE)) {
13839                 if (tg3_flag(tp, ENABLE_ASF))
13840                         tg3_read_dash_ver(tp);
13841         } else if (tg3_flag(tp, ENABLE_ASF)) {
13842                 tg3_read_mgmtfw_ver(tp);
13843         }
13844
13845 done:
13846         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13847 }
13848
13849 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13850 {
13851         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13852                 return TG3_RX_RET_MAX_SIZE_5717;
13853         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13854                 return TG3_RX_RET_MAX_SIZE_5700;
13855         else
13856                 return TG3_RX_RET_MAX_SIZE_5705;
13857 }
13858
13859 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13860         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13861         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13862         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13863         { },
13864 };
13865
13866 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13867 {
13868         struct pci_dev *peer;
13869         unsigned int func, devnr = tp->pdev->devfn & ~7;
13870
13871         for (func = 0; func < 8; func++) {
13872                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13873                 if (peer && peer != tp->pdev)
13874                         break;
13875                 pci_dev_put(peer);
13876         }
13877         /* 5704 can be configured in single-port mode, set peer to
13878          * tp->pdev in that case.
13879          */
13880         if (!peer) {
13881                 peer = tp->pdev;
13882                 return peer;
13883         }
13884
13885         /*
13886          * We don't need to keep the refcount elevated; there's no way
13887          * to remove one half of this device without removing the other
13888          */
13889         pci_dev_put(peer);
13890
13891         return peer;
13892 }
13893
13894 static int __devinit tg3_get_invariants(struct tg3 *tp)
13895 {
13896         u32 misc_ctrl_reg;
13897         u32 pci_state_reg, grc_misc_cfg;
13898         u32 val;
13899         u16 pci_cmd;
13900         int err;
13901
13902         /* Force memory write invalidate off.  If we leave it on,
13903          * then on 5700_BX chips we have to enable a workaround.
13904          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13905          * to match the cacheline size.  The Broadcom driver have this
13906          * workaround but turns MWI off all the times so never uses
13907          * it.  This seems to suggest that the workaround is insufficient.
13908          */
13909         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13910         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13911         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13912
13913         /* Important! -- Make sure register accesses are byteswapped
13914          * correctly.  Also, for those chips that require it, make
13915          * sure that indirect register accesses are enabled before
13916          * the first operation.
13917          */
13918         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13919                               &misc_ctrl_reg);
13920         tp->misc_host_ctrl |= (misc_ctrl_reg &
13921                                MISC_HOST_CTRL_CHIPREV);
13922         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13923                                tp->misc_host_ctrl);
13924
13925         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13926                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13927         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13928                 u32 prod_id_asic_rev;
13929
13930                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13931                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13932                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13933                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13934                         pci_read_config_dword(tp->pdev,
13935                                               TG3PCI_GEN2_PRODID_ASICREV,
13936                                               &prod_id_asic_rev);
13937                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13938                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13939                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13940                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13941                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13942                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13943                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
13944                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
13945                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
13946                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13947                         pci_read_config_dword(tp->pdev,
13948                                               TG3PCI_GEN15_PRODID_ASICREV,
13949                                               &prod_id_asic_rev);
13950                 else
13951                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13952                                               &prod_id_asic_rev);
13953
13954                 tp->pci_chip_rev_id = prod_id_asic_rev;
13955         }
13956
13957         /* Wrong chip ID in 5752 A0. This code can be removed later
13958          * as A0 is not in production.
13959          */
13960         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13961                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13962
13963         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13964          * we need to disable memory and use config. cycles
13965          * only to access all registers. The 5702/03 chips
13966          * can mistakenly decode the special cycles from the
13967          * ICH chipsets as memory write cycles, causing corruption
13968          * of register and memory space. Only certain ICH bridges
13969          * will drive special cycles with non-zero data during the
13970          * address phase which can fall within the 5703's address
13971          * range. This is not an ICH bug as the PCI spec allows
13972          * non-zero address during special cycles. However, only
13973          * these ICH bridges are known to drive non-zero addresses
13974          * during special cycles.
13975          *
13976          * Since special cycles do not cross PCI bridges, we only
13977          * enable this workaround if the 5703 is on the secondary
13978          * bus of these ICH bridges.
13979          */
13980         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13981             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13982                 static struct tg3_dev_id {
13983                         u32     vendor;
13984                         u32     device;
13985                         u32     rev;
13986                 } ich_chipsets[] = {
13987                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13988                           PCI_ANY_ID },
13989                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13990                           PCI_ANY_ID },
13991                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13992                           0xa },
13993                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13994                           PCI_ANY_ID },
13995                         { },
13996                 };
13997                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13998                 struct pci_dev *bridge = NULL;
13999
14000                 while (pci_id->vendor != 0) {
14001                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
14002                                                 bridge);
14003                         if (!bridge) {
14004                                 pci_id++;
14005                                 continue;
14006                         }
14007                         if (pci_id->rev != PCI_ANY_ID) {
14008                                 if (bridge->revision > pci_id->rev)
14009                                         continue;
14010                         }
14011                         if (bridge->subordinate &&
14012                             (bridge->subordinate->number ==
14013                              tp->pdev->bus->number)) {
14014                                 tg3_flag_set(tp, ICH_WORKAROUND);
14015                                 pci_dev_put(bridge);
14016                                 break;
14017                         }
14018                 }
14019         }
14020
14021         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14022                 static struct tg3_dev_id {
14023                         u32     vendor;
14024                         u32     device;
14025                 } bridge_chipsets[] = {
14026                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14027                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14028                         { },
14029                 };
14030                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14031                 struct pci_dev *bridge = NULL;
14032
14033                 while (pci_id->vendor != 0) {
14034                         bridge = pci_get_device(pci_id->vendor,
14035                                                 pci_id->device,
14036                                                 bridge);
14037                         if (!bridge) {
14038                                 pci_id++;
14039                                 continue;
14040                         }
14041                         if (bridge->subordinate &&
14042                             (bridge->subordinate->number <=
14043                              tp->pdev->bus->number) &&
14044                             (bridge->subordinate->subordinate >=
14045                              tp->pdev->bus->number)) {
14046                                 tg3_flag_set(tp, 5701_DMA_BUG);
14047                                 pci_dev_put(bridge);
14048                                 break;
14049                         }
14050                 }
14051         }
14052
14053         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14054          * DMA addresses > 40-bit. This bridge may have other additional
14055          * 57xx devices behind it in some 4-port NIC designs for example.
14056          * Any tg3 device found behind the bridge will also need the 40-bit
14057          * DMA workaround.
14058          */
14059         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14060             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14061                 tg3_flag_set(tp, 5780_CLASS);
14062                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14063                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14064         } else {
14065                 struct pci_dev *bridge = NULL;
14066
14067                 do {
14068                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14069                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
14070                                                 bridge);
14071                         if (bridge && bridge->subordinate &&
14072                             (bridge->subordinate->number <=
14073                              tp->pdev->bus->number) &&
14074                             (bridge->subordinate->subordinate >=
14075                              tp->pdev->bus->number)) {
14076                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14077                                 pci_dev_put(bridge);
14078                                 break;
14079                         }
14080                 } while (bridge);
14081         }
14082
14083         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14084             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14085                 tp->pdev_peer = tg3_find_peer(tp);
14086
14087         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14088             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14089             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14090                 tg3_flag_set(tp, 5717_PLUS);
14091
14092         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14093             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14094                 tg3_flag_set(tp, 57765_CLASS);
14095
14096         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14097                 tg3_flag_set(tp, 57765_PLUS);
14098
14099         /* Intentionally exclude ASIC_REV_5906 */
14100         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14101             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14102             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14103             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14104             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14105             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14106             tg3_flag(tp, 57765_PLUS))
14107                 tg3_flag_set(tp, 5755_PLUS);
14108
14109         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14110             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14111             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14112             tg3_flag(tp, 5755_PLUS) ||
14113             tg3_flag(tp, 5780_CLASS))
14114                 tg3_flag_set(tp, 5750_PLUS);
14115
14116         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14117             tg3_flag(tp, 5750_PLUS))
14118                 tg3_flag_set(tp, 5705_PLUS);
14119
14120         /* Determine TSO capabilities */
14121         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14122                 ; /* Do nothing. HW bug. */
14123         else if (tg3_flag(tp, 57765_PLUS))
14124                 tg3_flag_set(tp, HW_TSO_3);
14125         else if (tg3_flag(tp, 5755_PLUS) ||
14126                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14127                 tg3_flag_set(tp, HW_TSO_2);
14128         else if (tg3_flag(tp, 5750_PLUS)) {
14129                 tg3_flag_set(tp, HW_TSO_1);
14130                 tg3_flag_set(tp, TSO_BUG);
14131                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14132                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14133                         tg3_flag_clear(tp, TSO_BUG);
14134         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14135                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14136                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14137                         tg3_flag_set(tp, TSO_BUG);
14138                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14139                         tp->fw_needed = FIRMWARE_TG3TSO5;
14140                 else
14141                         tp->fw_needed = FIRMWARE_TG3TSO;
14142         }
14143
14144         /* Selectively allow TSO based on operating conditions */
14145         if (tg3_flag(tp, HW_TSO_1) ||
14146             tg3_flag(tp, HW_TSO_2) ||
14147             tg3_flag(tp, HW_TSO_3) ||
14148             tp->fw_needed) {
14149                 /* For firmware TSO, assume ASF is disabled.
14150                  * We'll disable TSO later if we discover ASF
14151                  * is enabled in tg3_get_eeprom_hw_cfg().
14152                  */
14153                 tg3_flag_set(tp, TSO_CAPABLE);
14154         } else {
14155                 tg3_flag_clear(tp, TSO_CAPABLE);
14156                 tg3_flag_clear(tp, TSO_BUG);
14157                 tp->fw_needed = NULL;
14158         }
14159
14160         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14161                 tp->fw_needed = FIRMWARE_TG3;
14162
14163         tp->irq_max = 1;
14164
14165         if (tg3_flag(tp, 5750_PLUS)) {
14166                 tg3_flag_set(tp, SUPPORT_MSI);
14167                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14168                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14169                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14170                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14171                      tp->pdev_peer == tp->pdev))
14172                         tg3_flag_clear(tp, SUPPORT_MSI);
14173
14174                 if (tg3_flag(tp, 5755_PLUS) ||
14175                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14176                         tg3_flag_set(tp, 1SHOT_MSI);
14177                 }
14178
14179                 if (tg3_flag(tp, 57765_PLUS)) {
14180                         tg3_flag_set(tp, SUPPORT_MSIX);
14181                         tp->irq_max = TG3_IRQ_MAX_VECS;
14182                         tg3_rss_init_dflt_indir_tbl(tp);
14183                 }
14184         }
14185
14186         if (tg3_flag(tp, 5755_PLUS))
14187                 tg3_flag_set(tp, SHORT_DMA_BUG);
14188
14189         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14190                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14191         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14192                 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
14193
14194         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14195             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14196             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14197                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14198
14199         if (tg3_flag(tp, 57765_PLUS) &&
14200             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14201                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14202
14203         if (!tg3_flag(tp, 5705_PLUS) ||
14204             tg3_flag(tp, 5780_CLASS) ||
14205             tg3_flag(tp, USE_JUMBO_BDFLAG))
14206                 tg3_flag_set(tp, JUMBO_CAPABLE);
14207
14208         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14209                               &pci_state_reg);
14210
14211         if (pci_is_pcie(tp->pdev)) {
14212                 u16 lnkctl;
14213
14214                 tg3_flag_set(tp, PCI_EXPRESS);
14215
14216                 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
14217                         int readrq = pcie_get_readrq(tp->pdev);
14218                         if (readrq > 2048)
14219                                 pcie_set_readrq(tp->pdev, 2048);
14220                 }
14221
14222                 pci_read_config_word(tp->pdev,
14223                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14224                                      &lnkctl);
14225                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14226                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14227                             ASIC_REV_5906) {
14228                                 tg3_flag_clear(tp, HW_TSO_2);
14229                                 tg3_flag_clear(tp, TSO_CAPABLE);
14230                         }
14231                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14232                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14233                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14234                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14235                                 tg3_flag_set(tp, CLKREQ_BUG);
14236                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14237                         tg3_flag_set(tp, L1PLLPD_EN);
14238                 }
14239         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14240                 /* BCM5785 devices are effectively PCIe devices, and should
14241                  * follow PCIe codepaths, but do not have a PCIe capabilities
14242                  * section.
14243                  */
14244                 tg3_flag_set(tp, PCI_EXPRESS);
14245         } else if (!tg3_flag(tp, 5705_PLUS) ||
14246                    tg3_flag(tp, 5780_CLASS)) {
14247                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14248                 if (!tp->pcix_cap) {
14249                         dev_err(&tp->pdev->dev,
14250                                 "Cannot find PCI-X capability, aborting\n");
14251                         return -EIO;
14252                 }
14253
14254                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14255                         tg3_flag_set(tp, PCIX_MODE);
14256         }
14257
14258         /* If we have an AMD 762 or VIA K8T800 chipset, write
14259          * reordering to the mailbox registers done by the host
14260          * controller can cause major troubles.  We read back from
14261          * every mailbox register write to force the writes to be
14262          * posted to the chip in order.
14263          */
14264         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14265             !tg3_flag(tp, PCI_EXPRESS))
14266                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14267
14268         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14269                              &tp->pci_cacheline_sz);
14270         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14271                              &tp->pci_lat_timer);
14272         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14273             tp->pci_lat_timer < 64) {
14274                 tp->pci_lat_timer = 64;
14275                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14276                                       tp->pci_lat_timer);
14277         }
14278
14279         /* Important! -- It is critical that the PCI-X hw workaround
14280          * situation is decided before the first MMIO register access.
14281          */
14282         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14283                 /* 5700 BX chips need to have their TX producer index
14284                  * mailboxes written twice to workaround a bug.
14285                  */
14286                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14287
14288                 /* If we are in PCI-X mode, enable register write workaround.
14289                  *
14290                  * The workaround is to use indirect register accesses
14291                  * for all chip writes not to mailbox registers.
14292                  */
14293                 if (tg3_flag(tp, PCIX_MODE)) {
14294                         u32 pm_reg;
14295
14296                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14297
14298                         /* The chip can have it's power management PCI config
14299                          * space registers clobbered due to this bug.
14300                          * So explicitly force the chip into D0 here.
14301                          */
14302                         pci_read_config_dword(tp->pdev,
14303                                               tp->pm_cap + PCI_PM_CTRL,
14304                                               &pm_reg);
14305                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14306                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14307                         pci_write_config_dword(tp->pdev,
14308                                                tp->pm_cap + PCI_PM_CTRL,
14309                                                pm_reg);
14310
14311                         /* Also, force SERR#/PERR# in PCI command. */
14312                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14313                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14314                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14315                 }
14316         }
14317
14318         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14319                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14320         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14321                 tg3_flag_set(tp, PCI_32BIT);
14322
14323         /* Chip-specific fixup from Broadcom driver */
14324         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14325             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14326                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14327                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14328         }
14329
14330         /* Default fast path register access methods */
14331         tp->read32 = tg3_read32;
14332         tp->write32 = tg3_write32;
14333         tp->read32_mbox = tg3_read32;
14334         tp->write32_mbox = tg3_write32;
14335         tp->write32_tx_mbox = tg3_write32;
14336         tp->write32_rx_mbox = tg3_write32;
14337
14338         /* Various workaround register access methods */
14339         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14340                 tp->write32 = tg3_write_indirect_reg32;
14341         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14342                  (tg3_flag(tp, PCI_EXPRESS) &&
14343                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14344                 /*
14345                  * Back to back register writes can cause problems on these
14346                  * chips, the workaround is to read back all reg writes
14347                  * except those to mailbox regs.
14348                  *
14349                  * See tg3_write_indirect_reg32().
14350                  */
14351                 tp->write32 = tg3_write_flush_reg32;
14352         }
14353
14354         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14355                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14356                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14357                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14358         }
14359
14360         if (tg3_flag(tp, ICH_WORKAROUND)) {
14361                 tp->read32 = tg3_read_indirect_reg32;
14362                 tp->write32 = tg3_write_indirect_reg32;
14363                 tp->read32_mbox = tg3_read_indirect_mbox;
14364                 tp->write32_mbox = tg3_write_indirect_mbox;
14365                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14366                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14367
14368                 iounmap(tp->regs);
14369                 tp->regs = NULL;
14370
14371                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14372                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14373                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14374         }
14375         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14376                 tp->read32_mbox = tg3_read32_mbox_5906;
14377                 tp->write32_mbox = tg3_write32_mbox_5906;
14378                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14379                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14380         }
14381
14382         if (tp->write32 == tg3_write_indirect_reg32 ||
14383             (tg3_flag(tp, PCIX_MODE) &&
14384              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14385               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14386                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14387
14388         /* The memory arbiter has to be enabled in order for SRAM accesses
14389          * to succeed.  Normally on powerup the tg3 chip firmware will make
14390          * sure it is enabled, but other entities such as system netboot
14391          * code might disable it.
14392          */
14393         val = tr32(MEMARB_MODE);
14394         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14395
14396         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14397         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14398             tg3_flag(tp, 5780_CLASS)) {
14399                 if (tg3_flag(tp, PCIX_MODE)) {
14400                         pci_read_config_dword(tp->pdev,
14401                                               tp->pcix_cap + PCI_X_STATUS,
14402                                               &val);
14403                         tp->pci_fn = val & 0x7;
14404                 }
14405         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14406                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14407                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14408                     NIC_SRAM_CPMUSTAT_SIG) {
14409                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14410                         tp->pci_fn = tp->pci_fn ? 1 : 0;
14411                 }
14412         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14413                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14414                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14415                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14416                     NIC_SRAM_CPMUSTAT_SIG) {
14417                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14418                                      TG3_CPMU_STATUS_FSHFT_5719;
14419                 }
14420         }
14421
14422         /* Get eeprom hw config before calling tg3_set_power_state().
14423          * In particular, the TG3_FLAG_IS_NIC flag must be
14424          * determined before calling tg3_set_power_state() so that
14425          * we know whether or not to switch out of Vaux power.
14426          * When the flag is set, it means that GPIO1 is used for eeprom
14427          * write protect and also implies that it is a LOM where GPIOs
14428          * are not used to switch power.
14429          */
14430         tg3_get_eeprom_hw_cfg(tp);
14431
14432         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14433                 tg3_flag_clear(tp, TSO_CAPABLE);
14434                 tg3_flag_clear(tp, TSO_BUG);
14435                 tp->fw_needed = NULL;
14436         }
14437
14438         if (tg3_flag(tp, ENABLE_APE)) {
14439                 /* Allow reads and writes to the
14440                  * APE register and memory space.
14441                  */
14442                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14443                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14444                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14445                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14446                                        pci_state_reg);
14447
14448                 tg3_ape_lock_init(tp);
14449         }
14450
14451         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14452             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14453             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14454             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14455             tg3_flag(tp, 57765_PLUS))
14456                 tg3_flag_set(tp, CPMU_PRESENT);
14457
14458         /* Set up tp->grc_local_ctrl before calling
14459          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14460          * will bring 5700's external PHY out of reset.
14461          * It is also used as eeprom write protect on LOMs.
14462          */
14463         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14464         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14465             tg3_flag(tp, EEPROM_WRITE_PROT))
14466                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14467                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14468         /* Unused GPIO3 must be driven as output on 5752 because there
14469          * are no pull-up resistors on unused GPIO pins.
14470          */
14471         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14472                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14473
14474         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14475             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14476             tg3_flag(tp, 57765_CLASS))
14477                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14478
14479         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14480             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14481                 /* Turn off the debug UART. */
14482                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14483                 if (tg3_flag(tp, IS_NIC))
14484                         /* Keep VMain power. */
14485                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14486                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14487         }
14488
14489         /* Switch out of Vaux if it is a NIC */
14490         tg3_pwrsrc_switch_to_vmain(tp);
14491
14492         /* Derive initial jumbo mode from MTU assigned in
14493          * ether_setup() via the alloc_etherdev() call
14494          */
14495         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14496                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14497
14498         /* Determine WakeOnLan speed to use. */
14499         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14500             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14501             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14502             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14503                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14504         } else {
14505                 tg3_flag_set(tp, WOL_SPEED_100MB);
14506         }
14507
14508         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14509                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14510
14511         /* A few boards don't want Ethernet@WireSpeed phy feature */
14512         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14513             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14514              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14515              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14516             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14517             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14518                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14519
14520         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14521             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14522                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14523         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14524                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14525
14526         if (tg3_flag(tp, 5705_PLUS) &&
14527             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14528             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14529             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14530             !tg3_flag(tp, 57765_PLUS)) {
14531                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14532                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14533                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14534                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14535                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14536                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14537                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14538                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14539                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14540                 } else
14541                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14542         }
14543
14544         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14545             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14546                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14547                 if (tp->phy_otp == 0)
14548                         tp->phy_otp = TG3_OTP_DEFAULT;
14549         }
14550
14551         if (tg3_flag(tp, CPMU_PRESENT))
14552                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14553         else
14554                 tp->mi_mode = MAC_MI_MODE_BASE;
14555
14556         tp->coalesce_mode = 0;
14557         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14558             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14559                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14560
14561         /* Set these bits to enable statistics workaround. */
14562         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14563             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14564             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14565                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14566                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14567         }
14568
14569         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14570             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14571                 tg3_flag_set(tp, USE_PHYLIB);
14572
14573         err = tg3_mdio_init(tp);
14574         if (err)
14575                 return err;
14576
14577         /* Initialize data/descriptor byte/word swapping. */
14578         val = tr32(GRC_MODE);
14579         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14580                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14581                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14582                         GRC_MODE_B2HRX_ENABLE |
14583                         GRC_MODE_HTX2B_ENABLE |
14584                         GRC_MODE_HOST_STACKUP);
14585         else
14586                 val &= GRC_MODE_HOST_STACKUP;
14587
14588         tw32(GRC_MODE, val | tp->grc_mode);
14589
14590         tg3_switch_clocks(tp);
14591
14592         /* Clear this out for sanity. */
14593         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14594
14595         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14596                               &pci_state_reg);
14597         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14598             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14599                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14600
14601                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14602                     chiprevid == CHIPREV_ID_5701_B0 ||
14603                     chiprevid == CHIPREV_ID_5701_B2 ||
14604                     chiprevid == CHIPREV_ID_5701_B5) {
14605                         void __iomem *sram_base;
14606
14607                         /* Write some dummy words into the SRAM status block
14608                          * area, see if it reads back correctly.  If the return
14609                          * value is bad, force enable the PCIX workaround.
14610                          */
14611                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14612
14613                         writel(0x00000000, sram_base);
14614                         writel(0x00000000, sram_base + 4);
14615                         writel(0xffffffff, sram_base + 4);
14616                         if (readl(sram_base) != 0x00000000)
14617                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14618                 }
14619         }
14620
14621         udelay(50);
14622         tg3_nvram_init(tp);
14623
14624         grc_misc_cfg = tr32(GRC_MISC_CFG);
14625         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14626
14627         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14628             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14629              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14630                 tg3_flag_set(tp, IS_5788);
14631
14632         if (!tg3_flag(tp, IS_5788) &&
14633             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14634                 tg3_flag_set(tp, TAGGED_STATUS);
14635         if (tg3_flag(tp, TAGGED_STATUS)) {
14636                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14637                                       HOSTCC_MODE_CLRTICK_TXBD);
14638
14639                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14640                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14641                                        tp->misc_host_ctrl);
14642         }
14643
14644         /* Preserve the APE MAC_MODE bits */
14645         if (tg3_flag(tp, ENABLE_APE))
14646                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14647         else
14648                 tp->mac_mode = 0;
14649
14650         /* these are limited to 10/100 only */
14651         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14652              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14653             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14654              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14655              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14656               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14657               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14658             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14659              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14660               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14661               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14662             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14663             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14664             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14665             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14666                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14667
14668         err = tg3_phy_probe(tp);
14669         if (err) {
14670                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14671                 /* ... but do not return immediately ... */
14672                 tg3_mdio_fini(tp);
14673         }
14674
14675         tg3_read_vpd(tp);
14676         tg3_read_fw_ver(tp);
14677
14678         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14679                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14680         } else {
14681                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14682                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14683                 else
14684                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14685         }
14686
14687         /* 5700 {AX,BX} chips have a broken status block link
14688          * change bit implementation, so we must use the
14689          * status register in those cases.
14690          */
14691         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14692                 tg3_flag_set(tp, USE_LINKCHG_REG);
14693         else
14694                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14695
14696         /* The led_ctrl is set during tg3_phy_probe, here we might
14697          * have to force the link status polling mechanism based
14698          * upon subsystem IDs.
14699          */
14700         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14701             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14702             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14703                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14704                 tg3_flag_set(tp, USE_LINKCHG_REG);
14705         }
14706
14707         /* For all SERDES we poll the MAC status register. */
14708         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14709                 tg3_flag_set(tp, POLL_SERDES);
14710         else
14711                 tg3_flag_clear(tp, POLL_SERDES);
14712
14713         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14714         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14715         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14716             tg3_flag(tp, PCIX_MODE)) {
14717                 tp->rx_offset = NET_SKB_PAD;
14718 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14719                 tp->rx_copy_thresh = ~(u16)0;
14720 #endif
14721         }
14722
14723         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14724         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14725         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14726
14727         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14728
14729         /* Increment the rx prod index on the rx std ring by at most
14730          * 8 for these chips to workaround hw errata.
14731          */
14732         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14733             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14734             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14735                 tp->rx_std_max_post = 8;
14736
14737         if (tg3_flag(tp, ASPM_WORKAROUND))
14738                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14739                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14740
14741         return err;
14742 }
14743
14744 #ifdef CONFIG_SPARC
14745 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14746 {
14747         struct net_device *dev = tp->dev;
14748         struct pci_dev *pdev = tp->pdev;
14749         struct device_node *dp = pci_device_to_OF_node(pdev);
14750         const unsigned char *addr;
14751         int len;
14752
14753         addr = of_get_property(dp, "local-mac-address", &len);
14754         if (addr && len == 6) {
14755                 memcpy(dev->dev_addr, addr, 6);
14756                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14757                 return 0;
14758         }
14759         return -ENODEV;
14760 }
14761
14762 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14763 {
14764         struct net_device *dev = tp->dev;
14765
14766         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14767         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14768         return 0;
14769 }
14770 #endif
14771
14772 static int __devinit tg3_get_device_address(struct tg3 *tp)
14773 {
14774         struct net_device *dev = tp->dev;
14775         u32 hi, lo, mac_offset;
14776         int addr_ok = 0;
14777
14778 #ifdef CONFIG_SPARC
14779         if (!tg3_get_macaddr_sparc(tp))
14780                 return 0;
14781 #endif
14782
14783         mac_offset = 0x7c;
14784         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14785             tg3_flag(tp, 5780_CLASS)) {
14786                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14787                         mac_offset = 0xcc;
14788                 if (tg3_nvram_lock(tp))
14789                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14790                 else
14791                         tg3_nvram_unlock(tp);
14792         } else if (tg3_flag(tp, 5717_PLUS)) {
14793                 if (tp->pci_fn & 1)
14794                         mac_offset = 0xcc;
14795                 if (tp->pci_fn > 1)
14796                         mac_offset += 0x18c;
14797         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14798                 mac_offset = 0x10;
14799
14800         /* First try to get it from MAC address mailbox. */
14801         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14802         if ((hi >> 16) == 0x484b) {
14803                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14804                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14805
14806                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14807                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14808                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14809                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14810                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14811
14812                 /* Some old bootcode may report a 0 MAC address in SRAM */
14813                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14814         }
14815         if (!addr_ok) {
14816                 /* Next, try NVRAM. */
14817                 if (!tg3_flag(tp, NO_NVRAM) &&
14818                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14819                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14820                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14821                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14822                 }
14823                 /* Finally just fetch it out of the MAC control regs. */
14824                 else {
14825                         hi = tr32(MAC_ADDR_0_HIGH);
14826                         lo = tr32(MAC_ADDR_0_LOW);
14827
14828                         dev->dev_addr[5] = lo & 0xff;
14829                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14830                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14831                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14832                         dev->dev_addr[1] = hi & 0xff;
14833                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14834                 }
14835         }
14836
14837         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14838 #ifdef CONFIG_SPARC
14839                 if (!tg3_get_default_macaddr_sparc(tp))
14840                         return 0;
14841 #endif
14842                 return -EINVAL;
14843         }
14844         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14845         return 0;
14846 }
14847
14848 #define BOUNDARY_SINGLE_CACHELINE       1
14849 #define BOUNDARY_MULTI_CACHELINE        2
14850
14851 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14852 {
14853         int cacheline_size;
14854         u8 byte;
14855         int goal;
14856
14857         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14858         if (byte == 0)
14859                 cacheline_size = 1024;
14860         else
14861                 cacheline_size = (int) byte * 4;
14862
14863         /* On 5703 and later chips, the boundary bits have no
14864          * effect.
14865          */
14866         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14867             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14868             !tg3_flag(tp, PCI_EXPRESS))
14869                 goto out;
14870
14871 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14872         goal = BOUNDARY_MULTI_CACHELINE;
14873 #else
14874 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14875         goal = BOUNDARY_SINGLE_CACHELINE;
14876 #else
14877         goal = 0;
14878 #endif
14879 #endif
14880
14881         if (tg3_flag(tp, 57765_PLUS)) {
14882                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14883                 goto out;
14884         }
14885
14886         if (!goal)
14887                 goto out;
14888
14889         /* PCI controllers on most RISC systems tend to disconnect
14890          * when a device tries to burst across a cache-line boundary.
14891          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14892          *
14893          * Unfortunately, for PCI-E there are only limited
14894          * write-side controls for this, and thus for reads
14895          * we will still get the disconnects.  We'll also waste
14896          * these PCI cycles for both read and write for chips
14897          * other than 5700 and 5701 which do not implement the
14898          * boundary bits.
14899          */
14900         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14901                 switch (cacheline_size) {
14902                 case 16:
14903                 case 32:
14904                 case 64:
14905                 case 128:
14906                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14907                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14908                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14909                         } else {
14910                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14911                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14912                         }
14913                         break;
14914
14915                 case 256:
14916                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14917                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14918                         break;
14919
14920                 default:
14921                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14922                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14923                         break;
14924                 }
14925         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14926                 switch (cacheline_size) {
14927                 case 16:
14928                 case 32:
14929                 case 64:
14930                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14931                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14932                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14933                                 break;
14934                         }
14935                         /* fallthrough */
14936                 case 128:
14937                 default:
14938                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14939                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14940                         break;
14941                 }
14942         } else {
14943                 switch (cacheline_size) {
14944                 case 16:
14945                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14946                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14947                                         DMA_RWCTRL_WRITE_BNDRY_16);
14948                                 break;
14949                         }
14950                         /* fallthrough */
14951                 case 32:
14952                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14953                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14954                                         DMA_RWCTRL_WRITE_BNDRY_32);
14955                                 break;
14956                         }
14957                         /* fallthrough */
14958                 case 64:
14959                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14960                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14961                                         DMA_RWCTRL_WRITE_BNDRY_64);
14962                                 break;
14963                         }
14964                         /* fallthrough */
14965                 case 128:
14966                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14967                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14968                                         DMA_RWCTRL_WRITE_BNDRY_128);
14969                                 break;
14970                         }
14971                         /* fallthrough */
14972                 case 256:
14973                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14974                                 DMA_RWCTRL_WRITE_BNDRY_256);
14975                         break;
14976                 case 512:
14977                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14978                                 DMA_RWCTRL_WRITE_BNDRY_512);
14979                         break;
14980                 case 1024:
14981                 default:
14982                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14983                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14984                         break;
14985                 }
14986         }
14987
14988 out:
14989         return val;
14990 }
14991
14992 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14993 {
14994         struct tg3_internal_buffer_desc test_desc;
14995         u32 sram_dma_descs;
14996         int i, ret;
14997
14998         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14999
15000         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15001         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15002         tw32(RDMAC_STATUS, 0);
15003         tw32(WDMAC_STATUS, 0);
15004
15005         tw32(BUFMGR_MODE, 0);
15006         tw32(FTQ_RESET, 0);
15007
15008         test_desc.addr_hi = ((u64) buf_dma) >> 32;
15009         test_desc.addr_lo = buf_dma & 0xffffffff;
15010         test_desc.nic_mbuf = 0x00002100;
15011         test_desc.len = size;
15012
15013         /*
15014          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15015          * the *second* time the tg3 driver was getting loaded after an
15016          * initial scan.
15017          *
15018          * Broadcom tells me:
15019          *   ...the DMA engine is connected to the GRC block and a DMA
15020          *   reset may affect the GRC block in some unpredictable way...
15021          *   The behavior of resets to individual blocks has not been tested.
15022          *
15023          * Broadcom noted the GRC reset will also reset all sub-components.
15024          */
15025         if (to_device) {
15026                 test_desc.cqid_sqid = (13 << 8) | 2;
15027
15028                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15029                 udelay(40);
15030         } else {
15031                 test_desc.cqid_sqid = (16 << 8) | 7;
15032
15033                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15034                 udelay(40);
15035         }
15036         test_desc.flags = 0x00000005;
15037
15038         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15039                 u32 val;
15040
15041                 val = *(((u32 *)&test_desc) + i);
15042                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15043                                        sram_dma_descs + (i * sizeof(u32)));
15044                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15045         }
15046         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15047
15048         if (to_device)
15049                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15050         else
15051                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15052
15053         ret = -ENODEV;
15054         for (i = 0; i < 40; i++) {
15055                 u32 val;
15056
15057                 if (to_device)
15058                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15059                 else
15060                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15061                 if ((val & 0xffff) == sram_dma_descs) {
15062                         ret = 0;
15063                         break;
15064                 }
15065
15066                 udelay(100);
15067         }
15068
15069         return ret;
15070 }
15071
15072 #define TEST_BUFFER_SIZE        0x2000
15073
15074 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15075         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15076         { },
15077 };
15078
15079 static int __devinit tg3_test_dma(struct tg3 *tp)
15080 {
15081         dma_addr_t buf_dma;
15082         u32 *buf, saved_dma_rwctrl;
15083         int ret = 0;
15084
15085         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15086                                  &buf_dma, GFP_KERNEL);
15087         if (!buf) {
15088                 ret = -ENOMEM;
15089                 goto out_nofree;
15090         }
15091
15092         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15093                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15094
15095         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15096
15097         if (tg3_flag(tp, 57765_PLUS))
15098                 goto out;
15099
15100         if (tg3_flag(tp, PCI_EXPRESS)) {
15101                 /* DMA read watermark not used on PCIE */
15102                 tp->dma_rwctrl |= 0x00180000;
15103         } else if (!tg3_flag(tp, PCIX_MODE)) {
15104                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15105                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15106                         tp->dma_rwctrl |= 0x003f0000;
15107                 else
15108                         tp->dma_rwctrl |= 0x003f000f;
15109         } else {
15110                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15111                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15112                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15113                         u32 read_water = 0x7;
15114
15115                         /* If the 5704 is behind the EPB bridge, we can
15116                          * do the less restrictive ONE_DMA workaround for
15117                          * better performance.
15118                          */
15119                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15120                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15121                                 tp->dma_rwctrl |= 0x8000;
15122                         else if (ccval == 0x6 || ccval == 0x7)
15123                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15124
15125                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15126                                 read_water = 4;
15127                         /* Set bit 23 to enable PCIX hw bug fix */
15128                         tp->dma_rwctrl |=
15129                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15130                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15131                                 (1 << 23);
15132                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15133                         /* 5780 always in PCIX mode */
15134                         tp->dma_rwctrl |= 0x00144000;
15135                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15136                         /* 5714 always in PCIX mode */
15137                         tp->dma_rwctrl |= 0x00148000;
15138                 } else {
15139                         tp->dma_rwctrl |= 0x001b000f;
15140                 }
15141         }
15142
15143         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15144             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15145                 tp->dma_rwctrl &= 0xfffffff0;
15146
15147         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15148             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15149                 /* Remove this if it causes problems for some boards. */
15150                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15151
15152                 /* On 5700/5701 chips, we need to set this bit.
15153                  * Otherwise the chip will issue cacheline transactions
15154                  * to streamable DMA memory with not all the byte
15155                  * enables turned on.  This is an error on several
15156                  * RISC PCI controllers, in particular sparc64.
15157                  *
15158                  * On 5703/5704 chips, this bit has been reassigned
15159                  * a different meaning.  In particular, it is used
15160                  * on those chips to enable a PCI-X workaround.
15161                  */
15162                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15163         }
15164
15165         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15166
15167 #if 0
15168         /* Unneeded, already done by tg3_get_invariants.  */
15169         tg3_switch_clocks(tp);
15170 #endif
15171
15172         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15173             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15174                 goto out;
15175
15176         /* It is best to perform DMA test with maximum write burst size
15177          * to expose the 5700/5701 write DMA bug.
15178          */
15179         saved_dma_rwctrl = tp->dma_rwctrl;
15180         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15181         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15182
15183         while (1) {
15184                 u32 *p = buf, i;
15185
15186                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15187                         p[i] = i;
15188
15189                 /* Send the buffer to the chip. */
15190                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15191                 if (ret) {
15192                         dev_err(&tp->pdev->dev,
15193                                 "%s: Buffer write failed. err = %d\n",
15194                                 __func__, ret);
15195                         break;
15196                 }
15197
15198 #if 0
15199                 /* validate data reached card RAM correctly. */
15200                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15201                         u32 val;
15202                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15203                         if (le32_to_cpu(val) != p[i]) {
15204                                 dev_err(&tp->pdev->dev,
15205                                         "%s: Buffer corrupted on device! "
15206                                         "(%d != %d)\n", __func__, val, i);
15207                                 /* ret = -ENODEV here? */
15208                         }
15209                         p[i] = 0;
15210                 }
15211 #endif
15212                 /* Now read it back. */
15213                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15214                 if (ret) {
15215                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15216                                 "err = %d\n", __func__, ret);
15217                         break;
15218                 }
15219
15220                 /* Verify it. */
15221                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15222                         if (p[i] == i)
15223                                 continue;
15224
15225                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15226                             DMA_RWCTRL_WRITE_BNDRY_16) {
15227                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15228                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15229                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15230                                 break;
15231                         } else {
15232                                 dev_err(&tp->pdev->dev,
15233                                         "%s: Buffer corrupted on read back! "
15234                                         "(%d != %d)\n", __func__, p[i], i);
15235                                 ret = -ENODEV;
15236                                 goto out;
15237                         }
15238                 }
15239
15240                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15241                         /* Success. */
15242                         ret = 0;
15243                         break;
15244                 }
15245         }
15246         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15247             DMA_RWCTRL_WRITE_BNDRY_16) {
15248                 /* DMA test passed without adjusting DMA boundary,
15249                  * now look for chipsets that are known to expose the
15250                  * DMA bug without failing the test.
15251                  */
15252                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15253                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15254                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15255                 } else {
15256                         /* Safe to use the calculated DMA boundary. */
15257                         tp->dma_rwctrl = saved_dma_rwctrl;
15258                 }
15259
15260                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15261         }
15262
15263 out:
15264         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15265 out_nofree:
15266         return ret;
15267 }
15268
15269 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15270 {
15271         if (tg3_flag(tp, 57765_PLUS)) {
15272                 tp->bufmgr_config.mbuf_read_dma_low_water =
15273                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15274                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15275                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15276                 tp->bufmgr_config.mbuf_high_water =
15277                         DEFAULT_MB_HIGH_WATER_57765;
15278
15279                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15280                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15281                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15282                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15283                 tp->bufmgr_config.mbuf_high_water_jumbo =
15284                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15285         } else if (tg3_flag(tp, 5705_PLUS)) {
15286                 tp->bufmgr_config.mbuf_read_dma_low_water =
15287                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15288                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15289                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15290                 tp->bufmgr_config.mbuf_high_water =
15291                         DEFAULT_MB_HIGH_WATER_5705;
15292                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15293                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15294                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15295                         tp->bufmgr_config.mbuf_high_water =
15296                                 DEFAULT_MB_HIGH_WATER_5906;
15297                 }
15298
15299                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15300                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15301                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15302                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15303                 tp->bufmgr_config.mbuf_high_water_jumbo =
15304                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15305         } else {
15306                 tp->bufmgr_config.mbuf_read_dma_low_water =
15307                         DEFAULT_MB_RDMA_LOW_WATER;
15308                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15309                         DEFAULT_MB_MACRX_LOW_WATER;
15310                 tp->bufmgr_config.mbuf_high_water =
15311                         DEFAULT_MB_HIGH_WATER;
15312
15313                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15314                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15315                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15316                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15317                 tp->bufmgr_config.mbuf_high_water_jumbo =
15318                         DEFAULT_MB_HIGH_WATER_JUMBO;
15319         }
15320
15321         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15322         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15323 }
15324
15325 static char * __devinit tg3_phy_string(struct tg3 *tp)
15326 {
15327         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15328         case TG3_PHY_ID_BCM5400:        return "5400";
15329         case TG3_PHY_ID_BCM5401:        return "5401";
15330         case TG3_PHY_ID_BCM5411:        return "5411";
15331         case TG3_PHY_ID_BCM5701:        return "5701";
15332         case TG3_PHY_ID_BCM5703:        return "5703";
15333         case TG3_PHY_ID_BCM5704:        return "5704";
15334         case TG3_PHY_ID_BCM5705:        return "5705";
15335         case TG3_PHY_ID_BCM5750:        return "5750";
15336         case TG3_PHY_ID_BCM5752:        return "5752";
15337         case TG3_PHY_ID_BCM5714:        return "5714";
15338         case TG3_PHY_ID_BCM5780:        return "5780";
15339         case TG3_PHY_ID_BCM5755:        return "5755";
15340         case TG3_PHY_ID_BCM5787:        return "5787";
15341         case TG3_PHY_ID_BCM5784:        return "5784";
15342         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15343         case TG3_PHY_ID_BCM5906:        return "5906";
15344         case TG3_PHY_ID_BCM5761:        return "5761";
15345         case TG3_PHY_ID_BCM5718C:       return "5718C";
15346         case TG3_PHY_ID_BCM5718S:       return "5718S";
15347         case TG3_PHY_ID_BCM57765:       return "57765";
15348         case TG3_PHY_ID_BCM5719C:       return "5719C";
15349         case TG3_PHY_ID_BCM5720C:       return "5720C";
15350         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15351         case 0:                 return "serdes";
15352         default:                return "unknown";
15353         }
15354 }
15355
15356 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15357 {
15358         if (tg3_flag(tp, PCI_EXPRESS)) {
15359                 strcpy(str, "PCI Express");
15360                 return str;
15361         } else if (tg3_flag(tp, PCIX_MODE)) {
15362                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15363
15364                 strcpy(str, "PCIX:");
15365
15366                 if ((clock_ctrl == 7) ||
15367                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15368                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15369                         strcat(str, "133MHz");
15370                 else if (clock_ctrl == 0)
15371                         strcat(str, "33MHz");
15372                 else if (clock_ctrl == 2)
15373                         strcat(str, "50MHz");
15374                 else if (clock_ctrl == 4)
15375                         strcat(str, "66MHz");
15376                 else if (clock_ctrl == 6)
15377                         strcat(str, "100MHz");
15378         } else {
15379                 strcpy(str, "PCI:");
15380                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15381                         strcat(str, "66MHz");
15382                 else
15383                         strcat(str, "33MHz");
15384         }
15385         if (tg3_flag(tp, PCI_32BIT))
15386                 strcat(str, ":32-bit");
15387         else
15388                 strcat(str, ":64-bit");
15389         return str;
15390 }
15391
15392 static void __devinit tg3_init_coal(struct tg3 *tp)
15393 {
15394         struct ethtool_coalesce *ec = &tp->coal;
15395
15396         memset(ec, 0, sizeof(*ec));
15397         ec->cmd = ETHTOOL_GCOALESCE;
15398         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15399         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15400         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15401         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15402         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15403         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15404         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15405         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15406         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15407
15408         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15409                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15410                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15411                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15412                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15413                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15414         }
15415
15416         if (tg3_flag(tp, 5705_PLUS)) {
15417                 ec->rx_coalesce_usecs_irq = 0;
15418                 ec->tx_coalesce_usecs_irq = 0;
15419                 ec->stats_block_coalesce_usecs = 0;
15420         }
15421 }
15422
15423 static int __devinit tg3_init_one(struct pci_dev *pdev,
15424                                   const struct pci_device_id *ent)
15425 {
15426         struct net_device *dev;
15427         struct tg3 *tp;
15428         int i, err, pm_cap;
15429         u32 sndmbx, rcvmbx, intmbx;
15430         char str[40];
15431         u64 dma_mask, persist_dma_mask;
15432         netdev_features_t features = 0;
15433
15434         printk_once(KERN_INFO "%s\n", version);
15435
15436         err = pci_enable_device(pdev);
15437         if (err) {
15438                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15439                 return err;
15440         }
15441
15442         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15443         if (err) {
15444                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15445                 goto err_out_disable_pdev;
15446         }
15447
15448         pci_set_master(pdev);
15449
15450         /* Find power-management capability. */
15451         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15452         if (pm_cap == 0) {
15453                 dev_err(&pdev->dev,
15454                         "Cannot find Power Management capability, aborting\n");
15455                 err = -EIO;
15456                 goto err_out_free_res;
15457         }
15458
15459         err = pci_set_power_state(pdev, PCI_D0);
15460         if (err) {
15461                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15462                 goto err_out_free_res;
15463         }
15464
15465         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15466         if (!dev) {
15467                 err = -ENOMEM;
15468                 goto err_out_power_down;
15469         }
15470
15471         SET_NETDEV_DEV(dev, &pdev->dev);
15472
15473         tp = netdev_priv(dev);
15474         tp->pdev = pdev;
15475         tp->dev = dev;
15476         tp->pm_cap = pm_cap;
15477         tp->rx_mode = TG3_DEF_RX_MODE;
15478         tp->tx_mode = TG3_DEF_TX_MODE;
15479
15480         if (tg3_debug > 0)
15481                 tp->msg_enable = tg3_debug;
15482         else
15483                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15484
15485         /* The word/byte swap controls here control register access byte
15486          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15487          * setting below.
15488          */
15489         tp->misc_host_ctrl =
15490                 MISC_HOST_CTRL_MASK_PCI_INT |
15491                 MISC_HOST_CTRL_WORD_SWAP |
15492                 MISC_HOST_CTRL_INDIR_ACCESS |
15493                 MISC_HOST_CTRL_PCISTATE_RW;
15494
15495         /* The NONFRM (non-frame) byte/word swap controls take effect
15496          * on descriptor entries, anything which isn't packet data.
15497          *
15498          * The StrongARM chips on the board (one for tx, one for rx)
15499          * are running in big-endian mode.
15500          */
15501         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15502                         GRC_MODE_WSWAP_NONFRM_DATA);
15503 #ifdef __BIG_ENDIAN
15504         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15505 #endif
15506         spin_lock_init(&tp->lock);
15507         spin_lock_init(&tp->indirect_lock);
15508         INIT_WORK(&tp->reset_task, tg3_reset_task);
15509
15510         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15511         if (!tp->regs) {
15512                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15513                 err = -ENOMEM;
15514                 goto err_out_free_dev;
15515         }
15516
15517         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15518             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15519             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15520             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15521             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15522             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15523             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15524             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15525                 tg3_flag_set(tp, ENABLE_APE);
15526                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15527                 if (!tp->aperegs) {
15528                         dev_err(&pdev->dev,
15529                                 "Cannot map APE registers, aborting\n");
15530                         err = -ENOMEM;
15531                         goto err_out_iounmap;
15532                 }
15533         }
15534
15535         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15536         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15537
15538         dev->ethtool_ops = &tg3_ethtool_ops;
15539         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15540         dev->netdev_ops = &tg3_netdev_ops;
15541         dev->irq = pdev->irq;
15542
15543         err = tg3_get_invariants(tp);
15544         if (err) {
15545                 dev_err(&pdev->dev,
15546                         "Problem fetching invariants of chip, aborting\n");
15547                 goto err_out_apeunmap;
15548         }
15549
15550         /* The EPB bridge inside 5714, 5715, and 5780 and any
15551          * device behind the EPB cannot support DMA addresses > 40-bit.
15552          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15553          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15554          * do DMA address check in tg3_start_xmit().
15555          */
15556         if (tg3_flag(tp, IS_5788))
15557                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15558         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15559                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15560 #ifdef CONFIG_HIGHMEM
15561                 dma_mask = DMA_BIT_MASK(64);
15562 #endif
15563         } else
15564                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15565
15566         /* Configure DMA attributes. */
15567         if (dma_mask > DMA_BIT_MASK(32)) {
15568                 err = pci_set_dma_mask(pdev, dma_mask);
15569                 if (!err) {
15570                         features |= NETIF_F_HIGHDMA;
15571                         err = pci_set_consistent_dma_mask(pdev,
15572                                                           persist_dma_mask);
15573                         if (err < 0) {
15574                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15575                                         "DMA for consistent allocations\n");
15576                                 goto err_out_apeunmap;
15577                         }
15578                 }
15579         }
15580         if (err || dma_mask == DMA_BIT_MASK(32)) {
15581                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15582                 if (err) {
15583                         dev_err(&pdev->dev,
15584                                 "No usable DMA configuration, aborting\n");
15585                         goto err_out_apeunmap;
15586                 }
15587         }
15588
15589         tg3_init_bufmgr_config(tp);
15590
15591         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15592
15593         /* 5700 B0 chips do not support checksumming correctly due
15594          * to hardware bugs.
15595          */
15596         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15597                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15598
15599                 if (tg3_flag(tp, 5755_PLUS))
15600                         features |= NETIF_F_IPV6_CSUM;
15601         }
15602
15603         /* TSO is on by default on chips that support hardware TSO.
15604          * Firmware TSO on older chips gives lower performance, so it
15605          * is off by default, but can be enabled using ethtool.
15606          */
15607         if ((tg3_flag(tp, HW_TSO_1) ||
15608              tg3_flag(tp, HW_TSO_2) ||
15609              tg3_flag(tp, HW_TSO_3)) &&
15610             (features & NETIF_F_IP_CSUM))
15611                 features |= NETIF_F_TSO;
15612         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15613                 if (features & NETIF_F_IPV6_CSUM)
15614                         features |= NETIF_F_TSO6;
15615                 if (tg3_flag(tp, HW_TSO_3) ||
15616                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15617                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15618                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15619                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15620                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15621                         features |= NETIF_F_TSO_ECN;
15622         }
15623
15624         dev->features |= features;
15625         dev->vlan_features |= features;
15626
15627         /*
15628          * Add loopback capability only for a subset of devices that support
15629          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15630          * loopback for the remaining devices.
15631          */
15632         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15633             !tg3_flag(tp, CPMU_PRESENT))
15634                 /* Add the loopback capability */
15635                 features |= NETIF_F_LOOPBACK;
15636
15637         dev->hw_features |= features;
15638
15639         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15640             !tg3_flag(tp, TSO_CAPABLE) &&
15641             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15642                 tg3_flag_set(tp, MAX_RXPEND_64);
15643                 tp->rx_pending = 63;
15644         }
15645
15646         err = tg3_get_device_address(tp);
15647         if (err) {
15648                 dev_err(&pdev->dev,
15649                         "Could not obtain valid ethernet address, aborting\n");
15650                 goto err_out_apeunmap;
15651         }
15652
15653         /*
15654          * Reset chip in case UNDI or EFI driver did not shutdown
15655          * DMA self test will enable WDMAC and we'll see (spurious)
15656          * pending DMA on the PCI bus at that point.
15657          */
15658         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15659             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15660                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15661                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15662         }
15663
15664         err = tg3_test_dma(tp);
15665         if (err) {
15666                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15667                 goto err_out_apeunmap;
15668         }
15669
15670         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15671         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15672         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15673         for (i = 0; i < tp->irq_max; i++) {
15674                 struct tg3_napi *tnapi = &tp->napi[i];
15675
15676                 tnapi->tp = tp;
15677                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15678
15679                 tnapi->int_mbox = intmbx;
15680                 if (i <= 4)
15681                         intmbx += 0x8;
15682                 else
15683                         intmbx += 0x4;
15684
15685                 tnapi->consmbox = rcvmbx;
15686                 tnapi->prodmbox = sndmbx;
15687
15688                 if (i)
15689                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15690                 else
15691                         tnapi->coal_now = HOSTCC_MODE_NOW;
15692
15693                 if (!tg3_flag(tp, SUPPORT_MSIX))
15694                         break;
15695
15696                 /*
15697                  * If we support MSIX, we'll be using RSS.  If we're using
15698                  * RSS, the first vector only handles link interrupts and the
15699                  * remaining vectors handle rx and tx interrupts.  Reuse the
15700                  * mailbox values for the next iteration.  The values we setup
15701                  * above are still useful for the single vectored mode.
15702                  */
15703                 if (!i)
15704                         continue;
15705
15706                 rcvmbx += 0x8;
15707
15708                 if (sndmbx & 0x4)
15709                         sndmbx -= 0x4;
15710                 else
15711                         sndmbx += 0xc;
15712         }
15713
15714         tg3_init_coal(tp);
15715
15716         pci_set_drvdata(pdev, dev);
15717
15718         if (tg3_flag(tp, 5717_PLUS)) {
15719                 /* Resume a low-power mode */
15720                 tg3_frob_aux_power(tp, false);
15721         }
15722
15723         err = register_netdev(dev);
15724         if (err) {
15725                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15726                 goto err_out_apeunmap;
15727         }
15728
15729         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15730                     tp->board_part_number,
15731                     tp->pci_chip_rev_id,
15732                     tg3_bus_string(tp, str),
15733                     dev->dev_addr);
15734
15735         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15736                 struct phy_device *phydev;
15737                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15738                 netdev_info(dev,
15739                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15740                             phydev->drv->name, dev_name(&phydev->dev));
15741         } else {
15742                 char *ethtype;
15743
15744                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15745                         ethtype = "10/100Base-TX";
15746                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15747                         ethtype = "1000Base-SX";
15748                 else
15749                         ethtype = "10/100/1000Base-T";
15750
15751                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15752                             "(WireSpeed[%d], EEE[%d])\n",
15753                             tg3_phy_string(tp), ethtype,
15754                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15755                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15756         }
15757
15758         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15759                     (dev->features & NETIF_F_RXCSUM) != 0,
15760                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15761                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15762                     tg3_flag(tp, ENABLE_ASF) != 0,
15763                     tg3_flag(tp, TSO_CAPABLE) != 0);
15764         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15765                     tp->dma_rwctrl,
15766                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15767                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15768
15769         pci_save_state(pdev);
15770
15771         return 0;
15772
15773 err_out_apeunmap:
15774         if (tp->aperegs) {
15775                 iounmap(tp->aperegs);
15776                 tp->aperegs = NULL;
15777         }
15778
15779 err_out_iounmap:
15780         if (tp->regs) {
15781                 iounmap(tp->regs);
15782                 tp->regs = NULL;
15783         }
15784
15785 err_out_free_dev:
15786         free_netdev(dev);
15787
15788 err_out_power_down:
15789         pci_set_power_state(pdev, PCI_D3hot);
15790
15791 err_out_free_res:
15792         pci_release_regions(pdev);
15793
15794 err_out_disable_pdev:
15795         pci_disable_device(pdev);
15796         pci_set_drvdata(pdev, NULL);
15797         return err;
15798 }
15799
15800 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15801 {
15802         struct net_device *dev = pci_get_drvdata(pdev);
15803
15804         if (dev) {
15805                 struct tg3 *tp = netdev_priv(dev);
15806
15807                 if (tp->fw)
15808                         release_firmware(tp->fw);
15809
15810                 tg3_reset_task_cancel(tp);
15811
15812                 if (tg3_flag(tp, USE_PHYLIB)) {
15813                         tg3_phy_fini(tp);
15814                         tg3_mdio_fini(tp);
15815                 }
15816
15817                 unregister_netdev(dev);
15818                 if (tp->aperegs) {
15819                         iounmap(tp->aperegs);
15820                         tp->aperegs = NULL;
15821                 }
15822                 if (tp->regs) {
15823                         iounmap(tp->regs);
15824                         tp->regs = NULL;
15825                 }
15826                 free_netdev(dev);
15827                 pci_release_regions(pdev);
15828                 pci_disable_device(pdev);
15829                 pci_set_drvdata(pdev, NULL);
15830         }
15831 }
15832
15833 #ifdef CONFIG_PM_SLEEP
15834 static int tg3_suspend(struct device *device)
15835 {
15836         struct pci_dev *pdev = to_pci_dev(device);
15837         struct net_device *dev = pci_get_drvdata(pdev);
15838         struct tg3 *tp = netdev_priv(dev);
15839         int err;
15840
15841         if (!netif_running(dev))
15842                 return 0;
15843
15844         tg3_reset_task_cancel(tp);
15845         tg3_phy_stop(tp);
15846         tg3_netif_stop(tp);
15847
15848         del_timer_sync(&tp->timer);
15849
15850         tg3_full_lock(tp, 1);
15851         tg3_disable_ints(tp);
15852         tg3_full_unlock(tp);
15853
15854         netif_device_detach(dev);
15855
15856         tg3_full_lock(tp, 0);
15857         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15858         tg3_flag_clear(tp, INIT_COMPLETE);
15859         tg3_full_unlock(tp);
15860
15861         err = tg3_power_down_prepare(tp);
15862         if (err) {
15863                 int err2;
15864
15865                 tg3_full_lock(tp, 0);
15866
15867                 tg3_flag_set(tp, INIT_COMPLETE);
15868                 err2 = tg3_restart_hw(tp, 1);
15869                 if (err2)
15870                         goto out;
15871
15872                 tp->timer.expires = jiffies + tp->timer_offset;
15873                 add_timer(&tp->timer);
15874
15875                 netif_device_attach(dev);
15876                 tg3_netif_start(tp);
15877
15878 out:
15879                 tg3_full_unlock(tp);
15880
15881                 if (!err2)
15882                         tg3_phy_start(tp);
15883         }
15884
15885         return err;
15886 }
15887
15888 static int tg3_resume(struct device *device)
15889 {
15890         struct pci_dev *pdev = to_pci_dev(device);
15891         struct net_device *dev = pci_get_drvdata(pdev);
15892         struct tg3 *tp = netdev_priv(dev);
15893         int err;
15894
15895         if (!netif_running(dev))
15896                 return 0;
15897
15898         netif_device_attach(dev);
15899
15900         tg3_full_lock(tp, 0);
15901
15902         tg3_flag_set(tp, INIT_COMPLETE);
15903         err = tg3_restart_hw(tp, 1);
15904         if (err)
15905                 goto out;
15906
15907         tp->timer.expires = jiffies + tp->timer_offset;
15908         add_timer(&tp->timer);
15909
15910         tg3_netif_start(tp);
15911
15912 out:
15913         tg3_full_unlock(tp);
15914
15915         if (!err)
15916                 tg3_phy_start(tp);
15917
15918         return err;
15919 }
15920
15921 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15922 #define TG3_PM_OPS (&tg3_pm_ops)
15923
15924 #else
15925
15926 #define TG3_PM_OPS NULL
15927
15928 #endif /* CONFIG_PM_SLEEP */
15929
15930 /**
15931  * tg3_io_error_detected - called when PCI error is detected
15932  * @pdev: Pointer to PCI device
15933  * @state: The current pci connection state
15934  *
15935  * This function is called after a PCI bus error affecting
15936  * this device has been detected.
15937  */
15938 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15939                                               pci_channel_state_t state)
15940 {
15941         struct net_device *netdev = pci_get_drvdata(pdev);
15942         struct tg3 *tp = netdev_priv(netdev);
15943         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15944
15945         netdev_info(netdev, "PCI I/O error detected\n");
15946
15947         rtnl_lock();
15948
15949         if (!netif_running(netdev))
15950                 goto done;
15951
15952         tg3_phy_stop(tp);
15953
15954         tg3_netif_stop(tp);
15955
15956         del_timer_sync(&tp->timer);
15957
15958         /* Want to make sure that the reset task doesn't run */
15959         tg3_reset_task_cancel(tp);
15960         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15961
15962         netif_device_detach(netdev);
15963
15964         /* Clean up software state, even if MMIO is blocked */
15965         tg3_full_lock(tp, 0);
15966         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15967         tg3_full_unlock(tp);
15968
15969 done:
15970         if (state == pci_channel_io_perm_failure)
15971                 err = PCI_ERS_RESULT_DISCONNECT;
15972         else
15973                 pci_disable_device(pdev);
15974
15975         rtnl_unlock();
15976
15977         return err;
15978 }
15979
15980 /**
15981  * tg3_io_slot_reset - called after the pci bus has been reset.
15982  * @pdev: Pointer to PCI device
15983  *
15984  * Restart the card from scratch, as if from a cold-boot.
15985  * At this point, the card has exprienced a hard reset,
15986  * followed by fixups by BIOS, and has its config space
15987  * set up identically to what it was at cold boot.
15988  */
15989 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15990 {
15991         struct net_device *netdev = pci_get_drvdata(pdev);
15992         struct tg3 *tp = netdev_priv(netdev);
15993         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15994         int err;
15995
15996         rtnl_lock();
15997
15998         if (pci_enable_device(pdev)) {
15999                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16000                 goto done;
16001         }
16002
16003         pci_set_master(pdev);
16004         pci_restore_state(pdev);
16005         pci_save_state(pdev);
16006
16007         if (!netif_running(netdev)) {
16008                 rc = PCI_ERS_RESULT_RECOVERED;
16009                 goto done;
16010         }
16011
16012         err = tg3_power_up(tp);
16013         if (err)
16014                 goto done;
16015
16016         rc = PCI_ERS_RESULT_RECOVERED;
16017
16018 done:
16019         rtnl_unlock();
16020
16021         return rc;
16022 }
16023
16024 /**
16025  * tg3_io_resume - called when traffic can start flowing again.
16026  * @pdev: Pointer to PCI device
16027  *
16028  * This callback is called when the error recovery driver tells
16029  * us that its OK to resume normal operation.
16030  */
16031 static void tg3_io_resume(struct pci_dev *pdev)
16032 {
16033         struct net_device *netdev = pci_get_drvdata(pdev);
16034         struct tg3 *tp = netdev_priv(netdev);
16035         int err;
16036
16037         rtnl_lock();
16038
16039         if (!netif_running(netdev))
16040                 goto done;
16041
16042         tg3_full_lock(tp, 0);
16043         tg3_flag_set(tp, INIT_COMPLETE);
16044         err = tg3_restart_hw(tp, 1);
16045         tg3_full_unlock(tp);
16046         if (err) {
16047                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16048                 goto done;
16049         }
16050
16051         netif_device_attach(netdev);
16052
16053         tp->timer.expires = jiffies + tp->timer_offset;
16054         add_timer(&tp->timer);
16055
16056         tg3_netif_start(tp);
16057
16058         tg3_phy_start(tp);
16059
16060 done:
16061         rtnl_unlock();
16062 }
16063
16064 static struct pci_error_handlers tg3_err_handler = {
16065         .error_detected = tg3_io_error_detected,
16066         .slot_reset     = tg3_io_slot_reset,
16067         .resume         = tg3_io_resume
16068 };
16069
16070 static struct pci_driver tg3_driver = {
16071         .name           = DRV_MODULE_NAME,
16072         .id_table       = tg3_pci_tbl,
16073         .probe          = tg3_init_one,
16074         .remove         = __devexit_p(tg3_remove_one),
16075         .err_handler    = &tg3_err_handler,
16076         .driver.pm      = TG3_PM_OPS,
16077 };
16078
16079 static int __init tg3_init(void)
16080 {
16081         return pci_register_driver(&tg3_driver);
16082 }
16083
16084 static void __exit tg3_cleanup(void)
16085 {
16086         pci_unregister_driver(&tg3_driver);
16087 }
16088
16089 module_init(tg3_init);
16090 module_exit(tg3_cleanup);